diff --git "a/train.csv" "b/train.csv" --- "a/train.csv" +++ "b/train.csv" @@ -1,17 +1,31 @@ Endpoint,Description,Inputs,Output,Test_Code -/audit/rest/delete/{UUID}/,requesting to delete audit using invalid tokens,"{ +/audit/rest/delete/{UUID}/,requesting to delete audit without authorization,"{ audit_uuid = ""valid_audit_uuid"" }","{ ""status"" : 401, -""message"" : ""Invalid token"" -}","def test_audit_delete_with_invalid_token(invalid_exec_api): +""message"" : ""Authentication credentials were not provided."" +}","def test_audit_delete_without_authorization(anonymous_exec_api): """""" - deleting the audits with invalid token + deleting the audits without authorization """""" - r = invalid_exec_api.audit_delete(""valid-audit-uuid"", params={}) + r = anonymous_exec_api.audit_delete(""valid-audit-uuid"", params={}) result = r.json() test_assert.status(r, 401) - assert result['detail'] == ""Invalid token."" + assert result['detail'] == ""Authentication credentials were not provided."" +" +/audit/rest/delete/{UUID}/,requesting to delete audit using valid existing UUID.Check the user type before performing the operation.,,"{ +""status"" : 204, +""response"" : success +}","def test_audit_delete(run_api, audit_delete): + """""" + deleting the audits + """""" + r = audit_delete + if run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 204) + + elif run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) " /audit/rest/delete/{UUID}/,"requesting to delete audit using invalid UUID. Check the user type before performing the operation. ","{ @@ -29,34 +43,65 @@ audit_uuid = ""invalid_audit_uuid"" elif run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: test_assert.status(r, 403) " -/audit/rest/delete/{UUID}/,requesting to delete audit using valid existing UUID.Check the user type before performing the operation.,,"{ -""status"" : 204, -""response"" : success -}","def test_audit_delete(run_api, audit_delete): +/audit/rest/delete/{UUID}/,requesting to delete audit using invalid tokens,"{ +audit_uuid = ""valid_audit_uuid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_audit_delete_with_invalid_token(invalid_exec_api): """""" - deleting the audits + deleting the audits with invalid token """""" - r = audit_delete - if run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 204) - - elif run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 403) + r = invalid_exec_api.audit_delete(""valid-audit-uuid"", params={}) + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Invalid token."" " -/audit/rest/delete/{UUID}/,requesting to delete audit without authorization,"{ +/audit/rest/detail/{UUID}/,requesting to fetch audit details without authorization,"{ audit_uuid = ""valid_audit_uuid"" }","{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided."" -}","def test_audit_delete_without_authorization(anonymous_exec_api): +""message"" : ""Authentication credentials were not provided"" +}","def test_audit_details_without_authorization(anonymous_exec_api): """""" - deleting the audits without authorization + Audit details without authorization """""" - r = anonymous_exec_api.audit_delete(""valid-audit-uuid"", params={}) + r = anonymous_exec_api.audit_details(""valid-audit-uuid"", params={}) result = r.json() test_assert.status(r, 401) assert result['detail'] == ""Authentication credentials were not provided."" " +/audit/rest/detail/{UUID}/,requesting to fetch audit details using invalid token,"{ +audit_uuid = ""valid_audit_uuid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_audit_details_with_invalid_token(invalid_exec_api): + """""" + Audit details with invalid token + """""" + r = invalid_exec_api.audit_details(""valid-audit-uuid"", params={}) + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Invalid token."" +" +/audit/rest/detail/{UUID}/,"requesting to fetch audit details using invalid id.Check the user type before performing the operation. +","{ +audit_uuid = ""invalid_audit_uuid"" +}","{ +""status"" : 400 / 404 / 403, +""response"" : Details +}","def test_audit_details_invalid_uuid(run_api): + """""" + Audit details of invalid audit + """""" + r = run_api.audit_details(""invalid-audit-uuid"", params={}) + if run_api.user_type == USER_TYPE[""admin""]: + status_code = r.status_code + assert status_code in [400, 404] + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) +" /audit/rest/detail/{UUID}/,"requesting to fetch audit details for existing audit. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ",,"{ ""status"" : 200 / 403, @@ -95,50 +140,45 @@ def test_audit_details_manager(skip_if_not_manager, run_api, admin_exec_api, cus uuid = r.json()[""results""][0]['uuid'] result = run_api.audit_details(uuid, params={}) test_assert.status(result, manager_rights_response(endpoint, manages_user=True))" -/audit/rest/detail/{UUID}/,"requesting to fetch audit details using invalid id.Check the user type before performing the operation. -","{ -audit_uuid = ""invalid_audit_uuid"" -}","{ -""status"" : 400 / 404 / 403, -""response"" : Details -}","def test_audit_details_invalid_uuid(run_api): +/audit/rest/list/,requesting to list audits. Check the user type before performing the operation.,,"{ +""status"" : 200 / 403, +""response"" : success +}","def test_audit_list(run_api, audit_list): """""" - Audit details of invalid audit + Audit list """""" - r = run_api.audit_details(""invalid-audit-uuid"", params={}) + template, r = audit_list if run_api.user_type == USER_TYPE[""admin""]: - status_code = r.status_code - assert status_code in [400, 404] + test_assert.status(r, template, ""audit_list"", ""obj_name"") + test_assert.status(r, 200) elif run_api.user_type == USER_TYPE[""non_admin""]: test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""manager""]: + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) " -/audit/rest/detail/{UUID}/,requesting to fetch audit details using invalid token,"{ -audit_uuid = ""valid_audit_uuid"" -}","{ +/audit/rest/list/,requesting to list audits without authorization,,"{ ""status"" : 401, -""message"" : ""Invalid token"" -}","def test_audit_details_with_invalid_token(invalid_exec_api): +""message"" : ""Authentication credentials were not provided."" +}","def test_audit_list_without_authorization(anonymous_exec_api): """""" - Audit details with invalid token + Audit list without authorization """""" - r = invalid_exec_api.audit_details(""valid-audit-uuid"", params={}) + r = anonymous_exec_api.audit_list() result = r.json() test_assert.status(r, 401) - assert result['detail'] == ""Invalid token."" + assert result['detail'] == ""Authentication credentials were not provided."" " -/audit/rest/detail/{UUID}/,requesting to fetch audit details without authorization,"{ -audit_uuid = ""valid_audit_uuid"" -}","{ +/audit/rest/list/,requesting to list audits with invalid token,,"{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_audit_details_without_authorization(anonymous_exec_api): +""message"" : ""Invalid token"" +}","def test_audit_list_with_invalid_token(invalid_exec_api): """""" - Audit details without authorization + Audit list with invalid token """""" - r = anonymous_exec_api.audit_details(""valid-audit-uuid"", params={}) + r = invalid_exec_api.audit_list() result = r.json() test_assert.status(r, 401) - assert result['detail'] == ""Authentication credentials were not provided."" + assert result['detail'] == ""Invalid token."" " /audit/rest/list/,"requesting filtered list of audits. Check the user type before performing the operation. ","{ @@ -173,61 +213,16 @@ def test_audit_list_filter(run_api, server_list): elif run_api.user_type == USER_TYPE[""manager""]: test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) " -/audit/rest/list/,requesting to list audits. Check the user type before performing the operation.,,"{ -""status"" : 200 / 403, -""response"" : success -}","def test_audit_list(run_api, audit_list): +/config/rest/delete/,"requesting to delete the config values with valid data.Check the user type before performing the operation +",,"{ +""status"" : 403 +}","def test_config_delete(skip_if_admin, run_api, config_delete): """""" - Audit list + deleting the config values """""" - template, r = audit_list - if run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, template, ""audit_list"", ""obj_name"") - test_assert.status(r, 200) - elif run_api.user_type == USER_TYPE[""non_admin""]: + params, r = config_delete + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""manager""]: - test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) -" -/audit/rest/list/,requesting to list audits with invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_audit_list_with_invalid_token(invalid_exec_api): - """""" - Audit list with invalid token - """""" - r = invalid_exec_api.audit_list() - result = r.json() - test_assert.status(r, 401) - assert result['detail'] == ""Invalid token."" -" -/audit/rest/list/,requesting to list audits without authorization,,"{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided."" -}","def test_audit_list_without_authorization(anonymous_exec_api): - """""" - Audit list without authorization - """""" - r = anonymous_exec_api.audit_list() - result = r.json() - test_assert.status(r, 401) - assert result['detail'] == ""Authentication credentials were not provided."" -" -/config/rest/delete/,requesting to delete the config values when provided valid config name but without authorization,"config_value = { ""name"": ""primary_server"" }","{ - ""status"": 401, - ""message"": ""Authentication credentials were not provided"" -} -","def test_config_delete_without_authorization(anonymous_exec_api): - """""" - deleting the non deletable config values without authorization - """""" - config_value = { - ""name"": ""primary_server"", - } - r = anonymous_exec_api.config_delete(config_value) - test_assert.status(r, 401) - result = r.json() - assert result['detail'] == ""Authentication credentials were not provided."" " /config/rest/delete/,"requesting to delete the config values when provided valid config name which can not be deleted(name='primary_server'). Check the user type before performing the operation. ","config_value = { ""name"": ""primary_server"" }","{ @@ -247,16 +242,21 @@ def test_config_delete_non_deletable(run_api): elif run_api.user_type == USER_TYPE['admin']: test_assert.status(r, 400) " -/config/rest/delete/,"requesting to delete the config values with valid data.Check the user type before performing the operation -",,"{ -""status"" : 403 -}","def test_config_delete(skip_if_admin, run_api, config_delete): +/config/rest/delete/,requesting to delete the config values when provided valid config name but without authorization,"config_value = { ""name"": ""primary_server"" }","{ + ""status"": 401, + ""message"": ""Authentication credentials were not provided"" +} +","def test_config_delete_without_authorization(anonymous_exec_api): """""" - deleting the config values + deleting the non deletable config values without authorization """""" - params, r = config_delete - if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: - test_assert.status(r, 403) + config_value = { + ""name"": ""primary_server"", + } + r = anonymous_exec_api.config_delete(config_value) + test_assert.status(r, 401) + result = r.json() + assert result['detail'] == ""Authentication credentials were not provided."" " /config/rest/delete/,"deleting config using invalid name value. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","config_value = { @@ -283,31 +283,45 @@ def test_config_delete_non_deletable(run_api): test_assert.status(r, 400) assert rjson['error'] == ""Config matching query does not exist."", ""|> Json %s"" % rjson " -/config/rest/get/,fetching the list of config values without Authorization,,"{ - ""status"": 401, - ""message"": ""Authentication credentials were not provided"" -} -","endpoint = ""config_get"" - -def test_config_get_without_authorization(anonymous_exec_api): +/config/rest/disable_ssl,"disabling the ssl in config. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 400, + ""message"" : ""SSL is not enabled. So, can't disable it"" +}","def test_config_disable_ssl(skip_if_admin, run_api): """""" - Fetching the values of variables stored in db without authorization + config disable ssl """""" - r = anonymous_exec_api.config_get() - test_assert.status(r, 401) - result = r.json() - assert result['detail'] == ""Authentication credentials were not provided."" + r = run_api.config_disable_ssl() + rjson = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert rjson['error'] == ""SSL is not enabled. So, can't disable it"", ""|> Json %s"" % rjson " -/config/rest/get/,"successfully fetching the list of config values.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +/config/rest/enable_ssl/,"enabling the ssl in config. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 400, + ""message"" : ""Upload SSL certificates, before enabling SSL"" +}","def test_config_enable_ssl(skip_if_admin, run_api): + """""" + config enable ssl + """""" + r = run_api.config_enable_ssl() + rjson = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert rjson['error'] == ""Upload SSL certificates, before enabling SSL"", ""|> Json %s"" % rjson +" +/config/rest/get,"fetching the list of config values. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ",,"{ ""status"": 200, - ""response"": Information of config list -} -"," -endpoint = ""config_get"" - - -def test_config_get(run_api, config_get): + ""response"": success +}","def test_config_get(run_api, config_get): """""" fetching the list of config values """""" @@ -318,21 +332,162 @@ def test_config_get(run_api, config_get): test_assert.status(r, 200) elif run_api.user_type == USER_TYPE[""manager""]: test_assert.status(r, manager_rights_response(endpoint)) +" +/config/rest/get,"fetching the list of config values without authorization. +",,"{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_config_get_without_authorization(anonymous_exec_api): + """""" + Fetching the values of variables stored in db without authorization + """""" + r = anonymous_exec_api.config_get() + test_assert.status(r, 401) + result = r.json() + assert result['detail'] == ""Authentication credentials were not provided."" " -/config/rest/os-version/,"fetching the information of Os Version with invalid token +/config/rest/get,"fetching the list of config values using invalid token +",,"{ + ""status"" : 401, + ""message"" : ""Invalid"" +}","def test_config_get_with_invalid_token(invalid_exec_api): + """""" + Fetching the values of variables stored in db with invalid token + """""" + r = invalid_exec_api.config_get() + test_assert.status(r, 401) + result = r.json() + assert result['detail'] == ""Invalid token."" +" +/config/rest/get_google_auth_client_id/,getting the Google authorization client_id successfully.,,"{ + ""status"" : 200, + ""response"" : client_id provided +}","def test_get_google_auth_client_id(run_api): + """""" + get google auth client_id + """""" + r = run_api.get_google_auth_client_id() + rjson = r.json() + test_assert.status(r, 200) + assert rjson['name'] == ""client_id"", ""|> Json %s"" % rjson +" +/config/rest/get/,"successfully fetching the list of config values.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"": 200, + ""response"": Information of config list +} +"," +def test_config_get(run_api, config_get): + """""" + fetching the list of config values + """""" + r = config_get + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 200) + elif run_api.user_type == USER_TYPE[""manager""]: + test_assert.status(r, manager_rights_response(endpoint)) + +" +/config/rest/get/,"fetching the list of config values. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ",,"{ + ""status"" : 200, + +}","@pytest.mark.parametrize(""name"", CONFIG_GET, indirect=True) +def test_config_get_name(run_api, config_get_name, name): + """""" + Fetching the values of variables stored in db + """""" + params, r = config_get_name + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 200) + elif run_api.user_type == USER_TYPE[""manager""]: + test_assert.status(r, manager_rights_response(endpoint)) +" +/config/rest/get/,"fetching the list of config values without authorization +","{ + ""name"" :""HELLO"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_config_get_name_without_authorization(anonymous_exec_api): + """""" + Fetching the values of variables stored in db without authorization + """""" + r = anonymous_exec_api.config_get_name(name=""HELLO"") + test_assert.status(r, 401) + result = r.json() + assert result['detail'] == ""Authentication credentials were not provided."" + + +" +/config/rest/get/,fetching the list of config values without Authorization,,"{ ""status"": 401, - ""message"": ""Invalid token "" -}","def test_config_osversion_with_invalid_token(invalid_exec_api): + ""message"": ""Authentication credentials were not provided"" +} +"," +def test_config_get_without_authorization(anonymous_exec_api): """""" - Fetching the information of Os Version with invalid token + Fetching the values of variables stored in db without authorization """""" - r = invalid_exec_api.config_version() + r = anonymous_exec_api.config_get() + test_assert.status(r, 401) result = r.json() + assert result['detail'] == ""Authentication credentials were not provided."" +" +/config/rest/get/,"fetching the list of config values using invalid token +","{ + ""name"" :""HELLO"" +}","{ + ""status"" : 401, + ""message"" : ""Invalid token"" +}","def test_config_get_name_with_invalid_token(invalid_exec_api): + """""" + Fetching the values of variables stored in db with invalid token + """""" + r = invalid_exec_api.config_get_name(name=""HELLO"") test_assert.status(r, 401) + result = r.json() assert result['detail'] == ""Invalid token."" " +/config/rest/get/,"fetching the list of config values for invalid name. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"" :""HELLO"" +}","{ + ""status"" : 400, + +}","def test_config_get_invalid_name(run_api): + """""" + Fetching the values of invalid variable stored in db + """""" + r = run_api.config_get_name(name=""HELLO"") + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 400) + elif run_api.user_type == USER_TYPE[""manager""]: + test_assert.status(r, manager_rights_response(endpoint)) +" +/config/rest/ldap/,"fetching the LDAP details of config. Check the user type before performing the operation, only admin user type have the permission to perform such operations.",,"{ + ""status"": 400, + ""message"": ""No such file or directory: 'colama/ldap.json'"" +}","def test_config_ldap_get(run_api): + """""" + get config ldap + """""" + r = run_api.config_ldap_get() + rjson = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert rjson['error'] == ""[Errno 2] No such file or directory: 'colama/ldap.json'"", ""|> Json %s"" % rjson +" /config/rest/os-version/,successfully fetching the information of Os Version without Authorization,,"{ ""status"": 200, ""response"": Server version @@ -344,125 +499,384 @@ def test_config_get(run_api, config_get): r = anonymous_exec_api.config_version() test_assert.status(r, 200) " -/config/rest/set/,setting the config values when provided with invalid data.Check the user type before performing the operation.,"config_value = {""name"": ""protected_mode"", - ""value"": ""test"" - } +/config/rest/os-version/,"fetching the information of Os Version with invalid token +",,"{ + ""status"": 401, + ""message"": ""Invalid token "" +}","def test_config_osversion_with_invalid_token(invalid_exec_api): + """""" + Fetching the information of Os Version with invalid token + """""" + r = invalid_exec_api.config_version() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Invalid token."" +" +/config/rest/set/,"setting the value of client_id config as ""None"".Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ - ""status"": 400 / 403 -}","PARAMETERS = [{ - ""name"": ""protected_mode"", - ""value"": ""test""}, { - ""name"": ""Hello"", - ""value"": ""test"" - }, ] + ""name"": ""client_id"", + ""value"": None +}","{ +""status"" : 400, +""response"" : ""FAILURE"" +}","def test_config_set_None_client_id(run_api): + """""" + Set the client_id config value as None + """""" + config_value = { + ""name"": ""client_id"", + ""value"": None + } + r = run_api.config_set(config_value) + res = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE' + assert 'Invalid Client_id Value' in res[""error""], res +" +/config/rest/set/,"setting the None value to secret config. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"": ""secret"", + ""value"": None +}","{ +""status"" : 400, +""response"" : 'Invalid secret_key Value' +}","def test_config_None_set_secret(run_api): + """""" + Set the secret-key config value as None + """""" + config_value = { + ""name"": ""secret"", + ""value"": None + } + r = run_api.config_set(config_value) + res = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE' + assert 'Invalid secret_key Value' in res[""error""], res -@pytest.mark.parametrize(""config_value"", PARAMETERS) -def test_config_set_invalid_data(run_api, config_value): + +" +/config/rest/set/,"setting the invalid name to config. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"": ""client_id_invalid"" + }","{ +""status"" : 400, +""response"" : 'Not a supported config name' +}","def test_config_set_invalid_client_id(run_api): """""" - Set the config invalid values + Set the client_id config value, using invalid config key + """""" + config_value = { + ""name"": ""client_id_invalid"" + } + r = run_api.config_set(config_value) + res = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE' + assert 'not a supported config name' in res[""error""], res +" +/config/rest/set/,"setting the empty value to secret config. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"": ""secret"", + ""value"": """" +}","{ +""status"" : 200, +}","def test_config_set_secret_key(skip_if_admin, run_api): + """""" + Set the secret_key config value + """""" + config_value = { + ""name"": ""secret"", + ""value"": """" + } + r = run_api.config_set(config_value) + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 200) +" +/config/rest/set/,"setting the config without the name parameter. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""value"": ""password"" + }","{ + ""status"": 400, + ""message"":Please provide config name."" +} +","def test_config_set_without_name(run_api): """""" + config set without name + """""" + config_value = { + ""value"": ""password"" + } r = run_api.config_set(config_value) + rjson = r.json() if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson elif run_api.user_type == USER_TYPE['admin']: test_assert.status(r, 400) + assert rjson['error'] == ""Please provide config name."", ""|> Json %s"" % rjson " -/config/rest/set/,setting the config values when provided with valid data but with invalid token,"config_value = {""name"": ""protected_mode"", +/config/rest/set/,setting the config values when provided with valid data but without Authorization,"config_value = {""name"": ""protected_mode"", ""value"": ""test"" } ","{ ""status"": 401, - ""message"": ""Invalid Token"" + ""message"": ""Authentication credentials were not provided"" } -","def test_config_set_with_invalid_token(invalid_exec_api): +","def test_config_set_without_authorization(anonymous_exec_api): """""" Set the config values without authorization """""" config_value = {""name"": ""protected_mode"", ""value"": ""test"" } - r = invalid_exec_api.config_set(config_value) + r = anonymous_exec_api.config_set(config_value) test_assert.status(r, 401) result = r.json() - assert result['detail'] == ""Invalid token."" + assert result['detail'] == ""Authentication credentials were not provided."" " -/config/rest/set/,setting the config values when provided with valid data but without Authorization,"config_value = {""name"": ""protected_mode"", +/config/rest/set/,setting the config values when provided with valid data but with invalid token,"config_value = {""name"": ""protected_mode"", ""value"": ""test"" } ","{ ""status"": 401, - ""message"": ""Authentication credentials were not provided"" + ""message"": ""Invalid Token"" } -","def test_config_set_without_authorization(anonymous_exec_api): +","def test_config_set_with_invalid_token(invalid_exec_api): """""" Set the config values without authorization """""" config_value = {""name"": ""protected_mode"", ""value"": ""test"" } - r = anonymous_exec_api.config_set(config_value) + r = invalid_exec_api.config_set(config_value) test_assert.status(r, 401) result = r.json() - assert result['detail'] == ""Authentication credentials were not provided."" + assert result['detail'] == ""Invalid token."" " -/config/rest/version/,requesting to get Version and Build Number of the product,,"{ - ""status"": 200, - ""response"": Version and build number -} -","def test_version_config(config_version): +/config/rest/set/,setting the config values when provided with invalid data.Check the user type before performing the operation.,"config_value = {""name"": ""protected_mode"", + ""value"": ""test"" + } +","{ + ""status"": 400 +}","PARAMETERS = [{ + ""name"": ""protected_mode"", + ""value"": ""test""}, { + ""name"": ""Hello"", + ""value"": ""test"" + }, ] + + +@pytest.mark.parametrize(""config_value"", PARAMETERS) +def test_config_set_invalid_data(run_api, config_value): """""" - Fetching the information of Version and Build Number + Set the config invalid values """""" - r = config_version - test_assert.status(r, 200) + r = run_api.config_set(config_value) + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + " -/config/rest/version/,requesting to get Version and Build Number of the product without Authorization,,"{ -""status"" : 200 -}","def test_version_config_without_authorization(anonymous_exec_api): - """""" - Fetching the information of Version and Build Number without authorization - """""" - r = anonymous_exec_api.config_version() +/config/rest/set/,"setting the config for non-editable keys. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"": ""ssl_cert"", + ""value"": ""password"" +}","{ + ""status"": 400, + ""message"": ""This can't be used to set ['ssl_cert', 'ssl_key']"" +} +","def test_config_set_non_editable_keys(run_api): + """""" + config set non editable keys + """""" + config_value = { + ""name"": ""ssl_cert"", + ""value"": ""password"" + } + r = run_api.config_set(config_value) + rjson = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert rjson['error'] == ""This can't be used to set ['ssl_cert', 'ssl_key']"", ""|> Json %s"" % rjson + +" +/config/rest/set/,setting the config for client_id,"{ + ""name"": ""client_id"", + ""value"": """" + }","{ + ""status"" :200 +}","def test_config_set_client_id(skip_if_admin, run_api): + """""" + Set the client_id config value + """""" + config_value = { + ""name"": ""client_id"", + ""value"": """" + } + r = run_api.config_set(config_value) + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 200) + + +" +/config/rest/upload_ssl/,"uploading ssl for config without any file. Check the user type before performing the operation, only admin user type have the permission to perform such operations.",,"{ + ""status"": 400, + ""response"": ""No file was submitted"" +}","def test_config_upload_ssl_without_any_file(run_api): + """""" + config upload ssl without any file + """""" + r = run_api.config_upload_ssl() + rjson = r.json() + if run_api.user_type in [USER_TYPE[""manager""], USER_TYPE[""non_admin""]]: + test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE['admin']: + test_assert.status(r, 400) + assert str(rjson['cert_file']) == ""['No file was submitted.']"", ""|> Json %s"" % rjson + assert str(rjson['key_file']) == ""['No file was submitted.']"", ""|> Json %s"" % rjson +" +/config/rest/version/,requesting to get Version and Build Number of the product without Authorization,,"{ +""status"" : 200 +}","def test_version_config_without_authorization(anonymous_exec_api): + """""" + Fetching the information of Version and Build Number without authorization + """""" + r = anonymous_exec_api.config_version() test_assert.status(r, 200) " -/deploy/rest/bulkops/,performing valid bulk operations on machines when requested with invalid token,"{ +/config/rest/version/,requesting to get Version and Build Number of the product,,"{ + ""status"": 200, + ""response"": Version and build number +} +","def test_version_config(config_version): + """""" + Fetching the information of Version and Build Number + """""" + r = config_version + test_assert.status(r, 200) +" +/deploy/rest/add-tags/,successfully adding tags to deployment provided that the number of machines is equal to the number of tags.,"{ +""machine_list"": [machine_id], +""tags_list"": [[tage_name]] +}","{ + ""status"": 201, +}","def test_deploy_add_tags(run_api, deploy_image): + """""" + Add tags when no of machines is equal to number of tags + """""" + x, r = deploy_image + machine_id = r.json()[""uuid""] + tage_name = ""random_tag"" + params = {""machine_list"": [machine_id], ""tags_list"": [[tage_name]]} + response = run_api.deploy_add_tags(params=params) + test_assert.status(response, 201) + machine_details = run_api.deploy_details(machine_id).json() + all_tags = [tags['value'] for tags in machine_details['tags']] + assert tage_name in all_tags, ""|> Json %s"" % all_tags +" +/deploy/rest/add-tags/,adding tags to deployment provided that the number of machines is less than the number of tags.,"{ +""machine_list"": [machine_id], +""tags_list"": [[""test_tag1""], [""test_tag2""]] +}","{ + ""status"": 400, + ""message"": ""Not enough machines to add tags"" +}","def test_deploy_add_tag_with_less_machine(run_api, deploy_image): + """""" + Add tags when no of machines is less than number of tags provided + """""" + x, r = deploy_image + machine_id = r.json()[""uuid""] + params = {""machine_list"": [machine_id], ""tags_list"": [[""test_tag1""], [""test_tag2""]]} + response = run_api.deploy_add_tags(params=params) + test_assert.status(response, 400) + assert response.json()['error'] == 'Not enough machines to add tags to.' + +" +/deploy/rest/bulkops/,performing valid bulk operations on machines without Authorization,"{ deploy_list = [""invalid""] }","{ ""status"" : 401, -""message"" : ""Invalid token"" -}","def test_deploy_bulkops_invalid_token(invalid_exec_api): +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_bulkops_without_authorization(anonymous_exec_api): """""" - when requested with list of valid uuids with invalid token + when requested with list of valid uuid without authorization """""" deploy_list = [""invalid""] deploy = { ""machine_list"": deploy_list, ""op"": ""start"" } - depl_bulkops = invalid_exec_api.deploy_bulkops(deploy, wait=False) + depl_bulkops = anonymous_exec_api.deploy_bulkops(deploy, wait=False) depl_json = depl_bulkops.json() test_assert.status(depl_bulkops, 401) - assert depl_json[""detail""] == ""Invalid token."" + assert depl_json[""detail""] == ""Authentication credentials were not provided."" " -/deploy/rest/bulkops/,performing valid bulk operations on machines when requested with list of all invalid UUIDs,"{ - deploy_list = [""invalid""] -}","{ +/deploy/rest/bulkops/,performing valid bulk operations on machines when requested with list of valid machine UUIDs by manager who has rights over servers,,"{ +""status"" : 400 / 201 +}","PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_bulkops_delete_by_manager_with_server_rights(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + when requested with machines_list (all valid UUID) by manager who has rights over servers + """""" + # when manager have the rights on the server + deploy_id = custom_lib_non_admin_operations + params = { + ""machine_list"": [deploy_id], + ""op"": ""delete"" + } + ret = run_api.deploy_bulkops(params, wait=False) + test_assert.status(ret, 201) + + # when manager does not have the rights on the server + deploy_id = custom_lib_admin_operations + params = { + ""machine_list"": [deploy_id], + ""op"": ""delete"" + } + ret = run_api.deploy_bulkops(params, wait=False) + test_assert.status(ret, 400) +" +/deploy/rest/bulkops/,performing valid bulk operations on machines when requested with list of valid machine UUIDs by a manager who does not have rights over the servers,,"{ ""status"" : 400 -}","def test_deploy_bulkops_invalid_UUID(run_api): +}","PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_bulkops_by_manager(skip_if_not_manager, custom_lib_admin_operations, run_api): """""" - when requested with list of invalid uuid + when manager does not manage the user nor the server """""" - deploy_list = [""invalid""] - deploy = { - ""machine_list"": deploy_list, + deploy_id = custom_lib_admin_operations + params = { + ""machine_list"": [deploy_id], ""op"": ""start"" } - depl_bulkops = run_api.deploy_bulkops(deploy, wait=False) - depl_json = depl_bulkops.json() - test_assert.status(depl_bulkops, 400) - assert ""doesn't exist"" in depl_json[""failure""][0][""error""], depl_json + ret = run_api.deploy_bulkops(params, wait=False) + test_assert.status(ret, 400) " /deploy/rest/bulkops/,performing valid bulk operations on machines when requested with list of valid machine UUIDs,"{ deploy_list = [] @@ -498,97 +912,65 @@ def test_config_set_invalid_data(run_api, config_value): else: test_assert.status(r, 201) " -/deploy/rest/bulkops/,performing valid bulk operations on machines when requested with list of valid machine UUIDs by a manager who does not have rights over the servers,,"{ +/deploy/rest/bulkops/,performing valid bulk operations on machines when requested with list of all invalid UUIDs,"{ + deploy_list = [""invalid""] +}","{ ""status"" : 400 -}","PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] - -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_deploy_bulkops_by_manager(skip_if_not_manager, custom_lib_admin_operations, run_api): +}","def test_deploy_bulkops_invalid_UUID(run_api): """""" - when manager does not manage the user nor the server + when requested with list of invalid uuid """""" - deploy_id = custom_lib_admin_operations - params = { - ""machine_list"": [deploy_id], + deploy_list = [""invalid""] + deploy = { + ""machine_list"": deploy_list, ""op"": ""start"" } - ret = run_api.deploy_bulkops(params, wait=False) - test_assert.status(ret, 400) -" -/deploy/rest/bulkops/,performing valid bulk operations on machines when requested with list of valid machine UUIDs by manager who has rights over servers,,"{ -""status"" : 400 / 201 -}","PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] -PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_deploy_bulkops_delete_by_manager_with_server_rights(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): - """""" - when requested with machines_list (all valid UUID) by manager who has rights over servers - """""" - # when manager have the rights on the server - deploy_id = custom_lib_non_admin_operations - params = { - ""machine_list"": [deploy_id], - ""op"": ""delete"" - } - ret = run_api.deploy_bulkops(params, wait=False) - test_assert.status(ret, 201) - - # when manager does not have the rights on the server - deploy_id = custom_lib_admin_operations - params = { - ""machine_list"": [deploy_id], - ""op"": ""delete"" - } - ret = run_api.deploy_bulkops(params, wait=False) - test_assert.status(ret, 400) + depl_bulkops = run_api.deploy_bulkops(deploy, wait=False) + depl_json = depl_bulkops.json() + test_assert.status(depl_bulkops, 400) + assert ""doesn't exist"" in depl_json[""failure""][0][""error""], depl_json " -/deploy/rest/bulkops/,performing valid bulk operations on machines without Authorization,"{ +/deploy/rest/bulkops/,performing valid bulk operations on machines when requested with invalid token,"{ deploy_list = [""invalid""] }","{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_bulkops_without_authorization(anonymous_exec_api): +""message"" : ""Invalid token"" +}","def test_deploy_bulkops_invalid_token(invalid_exec_api): """""" - when requested with list of valid uuid without authorization + when requested with list of valid uuids with invalid token """""" deploy_list = [""invalid""] deploy = { ""machine_list"": deploy_list, ""op"": ""start"" } - depl_bulkops = anonymous_exec_api.deploy_bulkops(deploy, wait=False) + depl_bulkops = invalid_exec_api.deploy_bulkops(deploy, wait=False) depl_json = depl_bulkops.json() test_assert.status(depl_bulkops, 401) - assert depl_json[""detail""] == ""Authentication credentials were not provided."" + assert depl_json[""detail""] == ""Invalid token."" " -/deploy/rest/bulkops/ ,performing invalid bulk operations on machines when requested with list of valid machine UUIDs,"{ - deploy_list = [] +/deploy/rest/bulkops/ ,performing invalid bulk operations on machines when requested with list of valid machine UUIDs but without Authorization,"{ + deploy_list = [""invalid""] } deploy = { ""machine_list"": deploy_list, ""op"": ""invalid"" }","{ -""status"" : 400 -}","def test_deploy_bulkops_invalid_ops(run_api, deploy_image): +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_bulkops_without_authorization(anonymous_exec_api): """""" - Bulk Operations on VM's with success and failed cases + Invalid bulk operations without authorization """""" - deploy_list = [] - params, r = deploy_image - x = r.json() - deploy_id = x[""UUID""] - deploy_list.append(deploy_id) + deploy_list = [""invalid""] deploy = { ""machine_list"": deploy_list, ""op"": ""invalid"" } - depl_bulkops = run_api.deploy_bulkops(deploy, wait=False) + depl_bulkops = anonymous_exec_api.deploy_bulkops(deploy, wait=False) depl_json = depl_bulkops.json() - test_assert.status(depl_bulkops, 400) - assert depl_json[""result""] == 'FAILURE', depl_json - assert 'Unsupported operation' in depl_json[""error""], depl_json + test_assert.status(depl_bulkops, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" " /deploy/rest/bulkops/ ,performing invalid bulk operations on machines when requested with list of valid machine UUIDs but invalid token,"{ deploy_list = [""invalid""] @@ -613,63 +995,96 @@ deploy = { test_assert.status(depl_bulkops, 401) assert depl_json[""detail""] == ""Invalid token."" " -/deploy/rest/bulkops/ ,performing invalid bulk operations on machines when requested with list of valid machine UUIDs but without Authorization,"{ - deploy_list = [""invalid""] +/deploy/rest/bulkops/ ,performing invalid bulk operations on machines when requested with list of valid machine UUIDs,"{ + deploy_list = [] } deploy = { ""machine_list"": deploy_list, ""op"": ""invalid"" }","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_bulkops_without_authorization(anonymous_exec_api): +""status"" : 400 +}","def test_deploy_bulkops_invalid_ops(run_api, deploy_image): """""" - Invalid bulk operations without authorization + Bulk Operations on VM's with success and failed cases """""" - deploy_list = [""invalid""] + deploy_list = [] + params, r = deploy_image + x = r.json() + deploy_id = x[""UUID""] + deploy_list.append(deploy_id) deploy = { ""machine_list"": deploy_list, ""op"": ""invalid"" } - depl_bulkops = anonymous_exec_api.deploy_bulkops(deploy, wait=False) + depl_bulkops = run_api.deploy_bulkops(deploy, wait=False) depl_json = depl_bulkops.json() - test_assert.status(depl_bulkops, 401) - assert depl_json[""detail""] == ""Authentication credentials were not provided."" + test_assert.status(depl_bulkops, 400) + assert depl_json[""result""] == 'FAILURE', depl_json + assert 'Unsupported operation' in depl_json[""error""], depl_json " -/deploy/rest/crash/{{UUID}}/,crashing a deployed image for valid UUID,,"{ -""status"" : 201 -}","def test_deploy_crash(deploy_crash): - """""" - Crashing a Deployed Image - """""" - x, r = deploy_crash - test_assert.status(r, 201) +/deploy/rest/crash/{{UUID}}/,crashing a deployed image without Authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_crash_without_authorization(anonymous_exec_api): + """""" + crashing a deployed image without authorization + """""" + deploy_id = ""invalid"" + depl_crash = anonymous_exec_api.deploy_crash(deploy_id, wait=False) + depl_json = depl_crash.json() + test_assert.status(depl_crash, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" " -/deploy/rest/crash/{{UUID}}/,crashing a deployed image for valid UUID by a non-admin user,,"{ -""status"" : 403 -}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] - -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_crash_non_admin(skip_if_not_non_admin, run_api, custom_lib_admin_operations): +/deploy/rest/crash/{{UUID}}/,crashing a deployed image when requested with invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_crash_invalid_token(invalid_exec_api): """""" - Crashing a Deployed Image by non-admin + crashing a deployed image for invalid token """""" - # Non-admin check of Crashing a Deployed Image created by different user - deploy_id = custom_lib_admin_operations - r = run_api.deploy_crash(deploy_id) - test_assert.status(r, 403) + deploy_id = ""invalid"" + depl_crash = invalid_exec_api.deploy_crash(deploy_id, wait=False) + depl_json = depl_crash.json() + test_assert.status(depl_crash, 401) + assert depl_json[""detail""] == ""Invalid token."" " -/deploy/rest/crash/{{UUID}}/,crashing a deployed image for valid UUID by an admin user,,"{ -""status"" : 201 -}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_crash_admin(skip_if_not_admin, run_api, custom_lib_non_admin_operations): +/deploy/rest/crash/{{UUID}}/,crashing a deployed image when Invalid UUID is provided,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_crash_invalid_UUID(run_api): """""" - Crashing a Deployed Image by Admin + crashing a deployed image for invalid UUID """""" - # Admin check of Crashing a Deployed Image created by different user + deploy_id = ""invalid"" + r = run_api.deploy_crash(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson" +/deploy/rest/crash/{{UUID}}/,crashing a deployed image for valid UUID by manager who has rights over servers,,,"endpoint = ""deploy_crash"" + +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_crash_manager_server_right(skip_if_not_manager, run_api, custom_lib_admin_operations, custom_lib_non_admin_operations): + """""" + Crashing a Deployed Image by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server deploy_id = custom_lib_non_admin_operations r = run_api.deploy_crash(deploy_id) - test_assert.status(r, 201) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) " /deploy/rest/crash/{{UUID}}/,crashing a deployed image for valid UUID by manager who do not have rights over servers,,,"endpoint = ""deploy_crash"" @@ -692,107 +1107,103 @@ def test_deploy_crash_manager_no_server_right(skip_if_not_manager, run_api, cust r = run_api.deploy_crash(deploy_id) test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) " -/deploy/rest/crash/{{UUID}}/,crashing a deployed image for valid UUID by manager who has rights over servers,,,"endpoint = ""deploy_crash"" - -PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_deploy_crash_manager_server_right(skip_if_not_manager, run_api, custom_lib_admin_operations, custom_lib_non_admin_operations): +/deploy/rest/crash/{{UUID}}/,crashing a deployed image for valid UUID by an admin user,,"{ +""status"" : 201 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_crash_admin(skip_if_not_admin, run_api, custom_lib_non_admin_operations): """""" - Crashing a Deployed Image by manager when have right on server + Crashing a Deployed Image by Admin """""" - # When the user is not part of the group that the manager manages - deploy_id = custom_lib_admin_operations - r = run_api.deploy_crash(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) - - # When the user is part of the group that the manager manages and deployment is on manager rights to server + # Admin check of Crashing a Deployed Image created by different user deploy_id = custom_lib_non_admin_operations r = run_api.deploy_crash(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + test_assert.status(r, 201) " -/deploy/rest/crash/{{UUID}}/,crashing a deployed image when Invalid UUID is provided,"{ -deploy_id = ""invalid"" -}","{ -""status"" : 404, -""message"" : ""Machine matching query does not exist"" -}","def test_deploy_crash_invalid_UUID(run_api): +/deploy/rest/crash/{{UUID}}/,crashing a deployed image for valid UUID by a non-admin user,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_crash_non_admin(skip_if_not_non_admin, run_api, custom_lib_admin_operations): """""" - crashing a deployed image for invalid UUID + Crashing a Deployed Image by non-admin """""" - deploy_id = ""invalid"" - r = run_api.deploy_crash(deploy_id, wait=False) - test_assert.status(r, 404) - rjson = r.json() - assert ""Machine matching query does not exist"" in rjson[""error""], rjson" -/deploy/rest/crash/{{UUID}}/,crashing a deployed image when requested with invalid token,"{ -deploy_id = ""invalid"" -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_deploy_crash_invalid_token(invalid_exec_api): + # Non-admin check of Crashing a Deployed Image created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/crash/{{UUID}}/,crashing a deployed image for valid UUID,,"{ +""status"" : 201 +}","def test_deploy_crash(deploy_crash): """""" - crashing a deployed image for invalid token + Crashing a Deployed Image """""" - deploy_id = ""invalid"" - depl_crash = invalid_exec_api.deploy_crash(deploy_id, wait=False) - depl_json = depl_crash.json() - test_assert.status(depl_crash, 401) - assert depl_json[""detail""] == ""Invalid token."" + x, r = deploy_crash + test_assert.status(r, 201) " -/deploy/rest/crash/{{UUID}}/,crashing a deployed image without Authorization,"{ +/deploy/rest/delete/{{UUID}}/,deleting the VM without Authorization,"{ deploy_id = ""invalid"" }","{ ""status"" : 401, ""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_crash_without_authorization(anonymous_exec_api): +}","def test_deploy_delete_without_authorization(anonymous_exec_api): """""" - crashing a deployed image without authorization + Deleting the VM without authorization """""" deploy_id = ""invalid"" - depl_crash = anonymous_exec_api.deploy_crash(deploy_id, wait=False) - depl_json = depl_crash.json() - test_assert.status(depl_crash, 401) + depl_delete = anonymous_exec_api.deploy_image_delete(deploy_id, {}, wait=False) + depl_json = depl_delete.json() + test_assert.status(depl_delete, 401) assert depl_json[""detail""] == ""Authentication credentials were not provided."" " -/deploy/rest/delete/{{UUID}}/,deleting the VM using invalid UUID,"{ +/deploy/rest/delete/{{UUID}}/,deleting the VM when requested with invalid token,"{ deploy_id = ""invalid"" }","{ -""status"" : 404, -""message"" : ""Machine matching query does not exist"" -}","def test_deploy_delete_invalid_UUID(run_api): +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_delete_invalid_token(invalid_exec_api): """""" - Deleting the VM using invalid uuid + Deleting the VM using invalid token """""" deploy_id = ""invalid"" - r = run_api.deploy_image_delete(deploy_id, {}) - test_assert.status(r, 404) - rjson = r.json() - assert ""Machine matching query does not exist"" in rjson[""error""], rjson + depl_delete = invalid_exec_api.deploy_image_delete(deploy_id, {}, wait=False) + depl_json = depl_delete.json() + test_assert.status(depl_delete, 401) + assert depl_json[""detail""] == ""Invalid token."" " -/deploy/rest/delete/{{UUID}}/,deleting the VM using valid data,,"{ -""status"" : 201 -}","def test_deploy_delete(deploy_delete): +/deploy/rest/delete/{{UUID}}/,deleting the VM using valid data by non-admin user,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_delete_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): """""" - Deleting the VM + Deleting the VM by non-Admin """""" - x, r = deploy_delete - test_assert.status(r, 201) + # Non-admin check for Deleting the Deployed VM created by different user. + deploy_id = custom_lib_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, 403) " -/deploy/rest/delete/{{UUID}}/,deleting the VM using valid data by admin user,,"{ -""status"" : 201 -}","PARAMETERS = [{""dest_obj"": OBJ_DEPL}] +/deploy/rest/delete/{{UUID}}/,deleting the VM using valid data by manager who has server rights,,,"endpoint = ""deploy_delete"" +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""deploy_with"": SRV_MANAGER_RIGHTS}] -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_delete_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_delete_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - Deleting the VM by Admin + Deleting the VM by Manager """""" - # Admin check for Deleting the Deployed VM created by different user. + # When the user is not part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server deploy_id = custom_lib_non_admin_operations r = run_api.deploy_image_delete(deploy_id, {}) - test_assert.status(r, 201) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) " /deploy/rest/delete/{{UUID}}/,deleting the VM using valid data by manager who does not have server rights,,,"endpoint = ""deploy_delete"" PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] @@ -813,12660 +1224,15044 @@ def test_deploy_delete_manager_no_server_right(skip_if_not_manager, custom_lib_a r = run_api.deploy_image_delete(deploy_id, {}) test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) " -/deploy/rest/delete/{{UUID}}/,deleting the VM using valid data by manager who has server rights,,,"endpoint = ""deploy_delete"" -PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""deploy_with"": SRV_MANAGER_RIGHTS}] +/deploy/rest/delete/{{UUID}}/,deleting the VM using valid data by admin user,,"{ +""status"" : 201 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL}] -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_deploy_delete_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_delete_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): """""" - Deleting the VM by Manager + Deleting the VM by Admin """""" - # When the user is not part of the group that the manager manages and deployment is on manager rights to server - deploy_id = custom_lib_admin_operations - r = run_api.deploy_image_delete(deploy_id, {}) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) - - # When the user is part of the group that the manager manages and deployment is on manager rights to server + # Admin check for Deleting the Deployed VM created by different user. deploy_id = custom_lib_non_admin_operations r = run_api.deploy_image_delete(deploy_id, {}) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + test_assert.status(r, 201) " -/deploy/rest/delete/{{UUID}}/,deleting the VM using valid data by non-admin user,,"{ -""status"" : 403 -}","PARAMETERS = [{""dest_obj"": OBJ_DEPL}] - -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_delete_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): +/deploy/rest/delete/{{UUID}}/,deleting the VM using valid data,,"{ +""status"" : 201 +}","def test_deploy_delete(deploy_delete): """""" - Deleting the VM by non-Admin + Deleting the VM """""" - # Non-admin check for Deleting the Deployed VM created by different user. - deploy_id = custom_lib_admin_operations - r = run_api.deploy_image_delete(deploy_id, {}) - test_assert.status(r, 403) + x, r = deploy_delete + test_assert.status(r, 201) " -/deploy/rest/delete/{{UUID}}/,deleting the VM when requested with invalid token,"{ +/deploy/rest/delete/{{UUID}}/,deleting the VM using invalid UUID,"{ deploy_id = ""invalid"" }","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_deploy_delete_invalid_token(invalid_exec_api): +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_delete_invalid_UUID(run_api): """""" - Deleting the VM using invalid token + Deleting the VM using invalid uuid """""" deploy_id = ""invalid"" - depl_delete = invalid_exec_api.deploy_image_delete(deploy_id, {}, wait=False) - depl_json = depl_delete.json() - test_assert.status(depl_delete, 401) - assert depl_json[""detail""] == ""Invalid token."" -" -/deploy/rest/delete/{{UUID}}/,deleting the VM without Authorization,"{ -deploy_id = ""invalid"" -}","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_delete_without_authorization(anonymous_exec_api): - """""" - Deleting the VM without authorization - """""" - deploy_id = ""invalid"" - depl_delete = anonymous_exec_api.deploy_image_delete(deploy_id, {}, wait=False) - depl_json = depl_delete.json() - test_assert.status(depl_delete, 401) - assert depl_json[""detail""] == ""Authentication credentials were not provided."" -" -/deploy/rest/deploy/{{UUID}}/,deploying a single virtual machine with default synchronous behavior(sync=false),"{ -sync = False -}","{ -""status"" : 200, -""response"" : success -}","def test_deploy_image_sync_false(run_api, library_add_new_vm): - """""" - deploy a VM image with sync as false - """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - r = run_api.deploy_image(lib_id, sync=False) - test_assert.status(r, 200) - deploy_id = r.json()[""UUID""] r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson " -/deploy/rest/deploy/{{UUID}}/,deploying a single VM with deploy_start set to True,"{ -lib_id, -deploy_start=True, -server_list=[server_list[0]] -}","{ -""status"" : 400, -""message"" : ""'Failed to deploy machine'"" -}","def test_deploy_image_deploy_start_true_one_vm(run_api): - """""" - deploy single VM with deploy_start as True - """""" - server_list = list(run_api.clm_my_servers.values()) - r = run_api.server_details(server_list[0]) - ram = r.json()[""total_ram""] - params, r = run_api.library_add_new_vm(ram=ram) - lib_id = r.json()[""UUID""] - r = run_api.deploy_image(lib_id, deploy_start=True, server_list=[server_list[0]]) - res = r.json() - test_assert.status(r, 400) - res[""result""] == 'FAILURE' - assert 'Failed to deploy machine' in res[""error""], res - run_api.library_delete(lib_id) -" -/deploy/rest/deploy/{{UUID}}/,deploying a virtual machine by providing group name that user isn't part of,,"{ +/deploy/rest/deploy/{{UUID}}/,provide server_list that is not under any group that the user is a part of,,"{ ""status"" : 400, -""message"" : ""You are not a part of the provided Group(s)"" +""message"" : 'You are not a part of the provided Group(s)' }"," -def test_deploy_image_group_list(run_api, library_add_new_vm): +def test_deploy_image_manager_server_list(skip_if_not_manager, run_api, library_add_new_vm): """""" - deploy a VM image with group_list, which user is not part of + deploy a VM image with provided server list, such that servers doesn't belong to any group, that user is a part of """""" params, r = library_add_new_vm lib_id = r[""UUID""] - group_list = [] - all_groups = list(run_api.clm_my_groups.keys()) - while 1: - name = f""test{random.randint(1, 1000)}"" - if name not in all_groups: - group_list.append(name) - break - - r = run_api.deploy_image(lib_id, group_list=group_list) + server_list = list(run_api.clm_not_my_servers.keys()) + r = run_api.deploy_image(lib_id, server_list=server_list) res = r.json() test_assert.status(r, 400) assert res[""result""] == 'FAILURE', res - assert 'You are not a part of the provided Group(s)' in res[""error""], res -" -/deploy/rest/deploy/{{UUID}}/,deploying a virtual machine where 'deploy_start = False',"{ -deploy_start=False -}","{ -""status"" : 200 -}","def test_deploy_image_deploy_start_false(run_api, library_add_new_vm): - """""" - deploy a VM image with deploy_start as false - """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - r = run_api.deploy_image(lib_id, deploy_start=False) - test_assert.status(r, 200) - deploy_id = r.json()[""UUID""] - r = run_api.deploy_image_delete(deploy_id, {}) -" -/deploy/rest/deploy/{{UUID}}/,deploying a virtual machine where 'sync = False',"{ -sync=False -}","{ -""status"" : 200 -}","def test_deploy_image_sync_false(run_api, library_add_new_vm): - """""" - deploy a VM image with sync as false - """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - r = run_api.deploy_image(lib_id, sync=False) - test_assert.status(r, 200) - deploy_id = r.json()[""UUID""] - r = run_api.deploy_image_delete(deploy_id, {}) -" -/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a count parameter exceeding system limits,,"{ -""status"" : 400, -""message"" : ""Not enough RAM"" -}","def test_deploy_image_count_exceeding_limits(run_api, library_add_new_vm): - """""" - deploy a VM image with count exceeding the system limits - """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - server_list = list(run_api.clm_my_servers.values()) - ram_all = 0 - for server in server_list: - r = run_api.server_details(server) - ram_all += r.json()[""total_ram""] - count = math.ceil((ram_all / 200)) * random.randint(100, 1000) - r = run_api.deploy_image(lib_id, count=count) - test_assert.status(r, 400) - res = r.json() - assert res[""result""] == 'FAILURE', res - assert 'Not enough RAM' in res[""error""], res -" -/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a valid vnc_password,"{ -vnc_password=""password"" -}","{ -""status"" : 200 -}","def test_deploy_image_valid_vnc_password(run_api, library_add_new_vm): - """""" - deploy a VM image using valid vnc_password - """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - r = run_api.deploy_image(lib_id, vnc_password=""password"") - test_assert.status(r, 200) - deploy_id = r.json()[""UUID""] - r = run_api.deploy_image_delete(deploy_id, {}) + assert ""Selected server(s) aren't under any group that you are a part of"" in res[""error""], res " -/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a vnc_password set to null,"{ -vnc_password= None -}","{ -""status"" : 400, -""message"" : ""This field may not be null"" -}","def test_deploy_image_null_vnc_password(run_api, library_add_new_vm): +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with added filters,,,"library_count = 10 +prefix_name = ""filter_vmname_dep_list_"" + +@pytest.mark.parametrize(""lib_filter_kwargs"", [{""vm_names"": [f""{prefix_name}{rand_string()}"" for _ in range(library_count)]}], indirect=True) +def test_deploy_list_filter(run_api: apiops, lib_filter_kwargs): """""" - deploy a VM image using null vnc_password + Fetching the list of deployed images by adding filters """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - r = run_api.deploy_image(lib_id, vnc_password=None) - test_assert.status(r, 400) - res = r.json() - assert res[""result""] == 'FAILURE', res - assert 'This field may not be null' in res[""error""], res + depl_res = [] + templates, res = lib_filter_kwargs + for r in res: + rjson = r.json() + depl_r = run_api.deploy_image(rjson[""UUID""]) + depl_res.append(depl_r) + try: + filter_on_input_result(run_api, library_count, templates, depl_res, prefix_name, run_api.deploy_list) + finally: + depl_UUIDs = [depl.json()[""UUID""] for depl in depl_res] + run_api.deploy_bulkops({""machine_list"": depl_UUIDs, ""op"": ""delete""}) + " -/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a vnc_password that contains special characters,"{ -vnc_password=""!@#$%"" -}","{ -""status"" : 200 -}","def test_deploy_image_special_char_vnc_password(run_api, library_add_new_vm): +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with added created and update DateTime Filter,,"{ +""status"" : 400 +}","def test_deploy_filter_timefilter(run_api: apiops, library_add_new_vm): """""" - deploy a VM image with vnc_password having special characters + Filter on created and update DateTime Filter """""" - params, r = library_add_new_vm + template, r = library_add_new_vm lib_id = r[""UUID""] - r = run_api.deploy_image(lib_id, vnc_password=""!@#$%"") - test_assert.status(r, 200) + r = run_api.deploy_image(lib_id) deploy_id = r.json()[""UUID""] - r = run_api.deploy_image_delete(deploy_id, {}) -" -/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a vnc_password that contains white spaces only,"{ -vnc_password="" "" -}","{ -""status"" : 400, -""message"" : ""This field may not be blank"" -}","def test_deploy_image_whitespace_vnc_password(run_api, library_add_new_vm): - """""" - deploy a VM image with valid vnc_password having only white spaces - """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - r = run_api.deploy_image(lib_id, vnc_password="" "") - test_assert.status(r, 400) - res = r.json() - assert res[""result""] == 'FAILURE', res - assert 'This field may not be blank' in res[""error""], res -" -/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a vnc_password that exceeds the 8 character limit,"{ -vnc_password=""+uCm7Z__YLP8kN(JwT{S]b*))Bvz:C[MRHbVkjlkhkl7GjL"" -}","{ -""status"" : 400, -""message"" : ""Ensure this field has no more than 8 characters"" -}","def test_deploy_image_too_long_vnc_password(run_api, library_add_new_vm): - """""" - deploy a VM image with vnc_password greater than 8 characters - """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - r = run_api.deploy_image(lib_id, vnc_password=""+uCm7Z__YLP8kN(JwT{S]b*))Bvz:C[MRHbVkjlkhkl7GjL"") - test_assert.status(r, 400) - res = r.json() - assert res[""result""] == 'FAILURE', res - assert 'Ensure this field has no more than 8 characters' in res[""error""], res -" -/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with an empty vnc_password,"{ -vnc_password="""" -}","{ -""status"" : 400, -""message"" : ""This field may not be blank"" -}"," -def test_deploy_image_empty_vnc_password(run_api, library_add_new_vm): - """""" - deploy a VM image using empty vnc_password - """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - r = run_api.deploy_image(lib_id, vnc_password="""") - test_assert.status(r, 400) - res = r.json() - assert res[""result""] == 'FAILURE', res - assert 'This field may not be blank' in res[""error""], res -" -/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with invalid deployment strategy,"{ -deployment_strategy=""invalid"" -}","{ - ""status"": 400, - ""message"": ""Not a valid choice"" -}","def test_deploy_image_invalid_deployment_strategy(run_api, library_add_new_vm): - """""" - deploy a VM image with invalid deployment strategy - """""" - params, r = library_add_new_vm + run_api.deploy_start(deploy_id) + r_details = run_api.deploy_details(deploy_id).json() + # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' + str_utime = r_details['utime'].replace('T', ' ').replace('Z', '') + str_ctime = r_details['created_on'].replace('T', ' ').replace('Z', '') + datetime_utime = convert_datetime_stringform(r_details['utime']) + datetime_ctime = convert_datetime_stringform(r_details['created_on']) + + def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): + """""" + Function to handle corner case if machine was created a day before and test get triggered on new day + """""" + if not utc: + created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, + ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 + + assert datetime_ctime < datetime_utime, f""The details of the Deployment is {r_details}"" + # Filter on UTC time + # .... When the datetime is selected to be the same as in detail + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, + ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 + # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # .........When the created_date_range format is invalid + response = run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has white spaces in them + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""page_size"": 1}).json()['count'] == 1 + + # Filter on list time + # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 list and test get triggered on new week at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 list and test get triggered on new month at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year's last day at 23:59:59.9999999 list and test get triggered on new year at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # .........When the created_date_range format is invalid + response = run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""invalid"", ""page_size"": 1}) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has white spaces in them + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + + def handle_trigger_delay_filtering_for_last_op(last_op_start_date, last_op_end_date, last_op_date_range, utc=True): + """""" + Function to handle corner case if machine has last operation a day before and test get triggered on new day + """""" + if not utc: + last_op_start_date = convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30' + last_op_end_date = convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30' + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": last_op_start_date, ""last_op_end_date"": last_op_end_date, + ""last_op_date_range"": last_op_date_range, ""page_size"": 1}).json()['count'] == 1 + # Filter on UTC time + # .... When the datetime is selected to be the same as in detail + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, + ""last_op_end_date"": str_utime, ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'last_op_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'last_op_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'last_op_start_date' and 'last_op_end_date' when passed blank string + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": """", ""last_op_end_date"": """", ""page_size"": 1}).json()['count'] == 1 + # ........Filter on 'last_op_start_date' and 'last_op_end_date' when last_op_start_date is greater than last_op_end_date + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=1)), + ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'last_op_start_date', 'last_op_end_date' and 'last_op_date_range'. + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'today + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'yesterday + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine has done last operation yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'week + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'month + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'year' + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation on year's last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") + # .........When the last_op_date_range format is invalid + response = run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['last_op_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the last_op_start_date and last_op_end_date has white spaces in them + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": "" "" + str_utime + "" "", ""last_op_end_date"": "" "" + str_utime + "" "", ""page_size"": 1}).json()['count'] == 1 + + # Filter on list time + # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', + ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'last_op_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'last_op_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'last_op_start_date', 'last_op_end_date' and 'last_op_date_range'. + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'today + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', + ""last_op_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'yesterday + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', + ""last_op_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine has done last operation yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'week + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', + ""last_op_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation on week's last day at 23:59:59.9999999 list and test get triggered on new week at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'month + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', + ""last_op_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation on month's last day at 23:59:59.9999999 list and test get triggered on new month at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) + # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'year' + try: + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', + ""last_op_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine has done last operation on year's last day at 23:59:59.9999999 list and test get triggered on new year at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) + # .........When the last_op_date_range format is invalid + response = run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', + ""last_op_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['last_op_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the last_op_start_date and last_op_end_date has white spaces in them + assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": "" "" + convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30' + "" "", ""last_op_end_date"": "" "" + convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30' + "" "", + ""last_op_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + + # ........Filter on 'created_start_date', 'created_end_date', 'last_op_start_date', 'last_op_end_date', 'created_date_range' and 'last_op_date_range' + assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""last_op_start_date"": str_utime, + ""last_op_end_date"": str_utime, ""created_date_range"": ""today"", ""last_op_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + + run_api.deploy_stop(deploy_id) + run_api.deploy_image_delete(deploy_id) +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'tags_list' param,"params = {""machine_list"": [machine_id], ""tags_list"": [[tag]]}",,"def test_deploy_fetch_with_tags(deploy_image, run_api): + """""" + Fetch list with tags + """""" + params, r = deploy_image + machine_id = r.json()[""UUID""] + tag = rand_string() + params = {""machine_list"": [machine_id], ""tags_list"": [[tag]]} + run_api.deploy_add_tags(params=params) + res = run_api.deploy_list(params={""tags"": tag}) + assert res.json()[""count""] == 1, ""The error is %s"" % res.json() +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'state' param,"params = {""state"": state}",,"def test_deploy_list_with_machine_state_filter(run_api): + """""" + fetch list with deploy machine state filter + """""" + state = ""stopped"" + params = {""state"": state} + rjson = run_api.deploy_list(params).json() + for machines in rjson['results']: + assert machines['state'] == state, ""Json |> %s"" % machines +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'scope' param set to invalid scope name,"params = {'scope': ""invalid"", 'UUID': machine_id}",,"def test_deploy_list_with_invaild_scope_name(run_api, deploy_image): + """""" + fetch list with invalid scope name + """""" + p, r = deploy_image + machine_id = r.json()['UUID'] + params = {'scope': ""invalid"", 'UUID': machine_id} + rjson = run_api.deploy_list(params).json() # 'my' is default scope gets applied on invalid scope + assert rjson['count'] == 1, ""The error is %s"" % rjson + +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'scope' param set to 'all',"params = {'scope': ""all"", 'UUID': deploy_id}",,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_list_with_scope_all(skip_if_not_admin, run_api, custom_lib_non_admin_operations): + """""" + fetch list with scope all + """""" + deploy_id = custom_lib_non_admin_operations + params = {'scope': ""all"", 'UUID': deploy_id} + rjson = run_api.deploy_list(params).json() + assert rjson['count'] == 1, ""The error is %s"" % rjson +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'mac' param,"params = {""mac"": mac}",,"def test_deploy_list_fetch_with_mac(run_api): + """""" + Fetch deploy list with 'mac' param + """""" + mac = ""5A:54:00:12:23:34"" + params = {""mac"": mac} + rjson = run_api.deploy_list(params).json() + for machines in rjson['results']: + all_macs = [network['mac'] for network in machines['machine']['hw']['networks']] + assert mac in all_macs, ""Json |> %s"" % machines + +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'iso' param,"cdrom = [{ + ""type"": ""sata"", + ""iso"": rand_string(), + ""is_boot"": True, + ""boot_order"": 1 + } + ] +",,"def test_deploy_fetch_with_iso(run_api): + """""" + Fetch list with 'iso' + """""" + cdrom = [{ + ""type"": ""sata"", + ""iso"": rand_string(), + ""is_boot"": True, + ""boot_order"": 1 + } + ] + params, r = run_api.library_add_new_vm(cdrom=cdrom) + lib_id = r.json()[""UUID""] + response = run_api.deploy_image(lib_id) + machine_id = response.json()[""UUID""] + params = {""iso"": response.json()['machine']['hw']['cdrom'][-1]['iso']} + assert run_api.deploy_list(params).json()['count'] == 1 + run_api.deploy_image_delete(deploy_id=machine_id) + run_api.library_delete(lib_id) +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'hvm_type' param,"params = {""hvm_type"": kvm}",,"def test_deploy_fetch_with_hvm_type(deploy_image, run_api): + """""" + Fetch list with 'hvm_type' + """""" + params, r = deploy_image + rjson = r.json() + kvm = rjson['machine']['hw']['hvm_type'] + params = {""hvm_type"": kvm} + rjson = run_api.deploy_list(params).json() + for machines in rjson['results']: + assert machines['machine']['hw']['hvm_type'] == kvm, ""Json |> %s"" % machines +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'disks__UUID' param,"params = { +""disk_UUID"": valid_existing_disk +}",,"def test_deploy_list_fetch_with_disk_UUID(deploy_image, run_api): + """""" + Fetch deploy list with 'disks_UUID' param + """""" + template, r = deploy_image + rjson = r.json() + params = {""disk_UUID"": rjson['machine']['hw']['disks'][0]['UUID']} + assert run_api.deploy_list(params).json()['count'] == 1 +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'arch' param,"params = {""arch"": arch}",,"def test_deploy_fetch_with_arch(deploy_image, run_api): + """""" + Fetch list with 'arch' + """""" + params, r = deploy_image + rjson = r.json() + mc_id = rjson['UUID'] + arch = rjson['machine']['hw']['arch'] + params = {""arch"": arch} + rjson = run_api.deploy_list(params).json() + all_UUID = [mc['UUID'] for mc in rjson['results']] + assert mc_id in all_UUID, ""|> Json %s"" % rjson + for machines in rjson['results']: + assert machines['machine']['hw']['arch'] == arch, ""Json |> %s"" % machines + +" +/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image,,"{ +""status"" : 200, +""response"" : machine details +}","@pytest.mark.skip() +def test_deploy_list(deploy_list): + """""" + Fetching the list of deployed images + """""" + r = deploy_list + test_assert.status(r, 200) +" +/deploy/rest/deploy/{{UUID}}/,deploying zero virtual machines by setting the 'count' param to zero,"{ +count : 0 +}","{ +""status"" : 400, +""message"" : ""Ensure this value is greater than or equal to 1"" +}","def test_deploy_image_zero_count(run_api, library_add_new_vm): + """""" + deploy a VM image with zero count value + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, count=0) + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'Ensure this value is greater than or equal to 1' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying VM with a 'count' parameter set to negative value,"{ +count : -2 +}","{ +""status"" : 400, +""message"" : ""Ensure this value is greater than or equal to 1"" +}"," +def test_deploy_image_negative_count(run_api, library_add_new_vm): + """""" + deploy a VM image with negative count value + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, count=-2) + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'Ensure this value is greater than or equal to 1' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying virtual machines with a count parameter set within system limits.,"{ +count : 2 +}","{ +""status"" : 200, +""response"" : success +}","def test_deploy_image_multiple_count(run_api, library_add_new_vm): + """""" + deploy a VM image with multiple count + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, count=2) + res = r.json() + if 'bulk_job_UUID' in res: + test_assert.status(r, 200) + for deployment in res[""deployments""]: + deploy_id = deployment[""UUID""] + r = run_api.deploy_image_delete(deploy_id, {}) + else: + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE', res + assert 'Not enough RAM' in res[""error""], res + +" +/deploy/rest/deploy/{{UUID}}/,deploying multiple virtual machines with default synchronous behavior.,"{ +deploy_start=True +}","{ +""status"" : 200, +""response"" : success +}","def test_deploy_image_deploy_start_true_multiple_vm(run_api, library_add_new_vm): + """""" + deploy multiple VM's with deploy_start as True + """""" + params, r = library_add_new_vm + lib_id = r[""uuid""] + server_list = list(run_api.clm_my_servers.values()) + r = run_api.server_details(server_list[0]) + ram = r.json()[""total_ram""] + count = math.ceil(ram / 200) + 1 + r = run_api.deploy_image(lib_id, count=count, deploy_start=True, server_list=[server_list[0]]) + res = r.json() + test_assert.status(r, 400) + res[""result""] == 'FAILURE' + assert 'Not enough RAM' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying an image without Authorization,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_image_without_authorization(anonymous_exec_api): + deploy_id = ""invalid"" + depl_image = anonymous_exec_api.deploy_image(deploy_id, wait=False) + depl_json = depl_image.json() + test_assert.status(depl_image, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/deploy/{{UUID}}/,deploying an image with valid data,,"{ +""status"" : 200, +""response"" : image deployed +}","def test_deploy_image_vm_self(deploy_image): + """""" + Deploy image + """""" + template, r = deploy_image + res = r.json() + test_assert.status(res, template, ""deploy_image"") + test_assert.status(r, 200) +" +/deploy/rest/deploy/{{UUID}}/,deploying an image with invalid token provided,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_image_invalid_token(invalid_exec_api): + deploy_id = ""invalid"" + depl_image = invalid_exec_api.deploy_image(deploy_id, wait=False) + depl_json = depl_image.json() + test_assert.status(depl_image, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/deploy/{{UUID}}/,deploying an image when invalid UUID is provided,"{ + UUID = ""zxyz"" + +}","{ +""status"" : 404, +""message"" : Machine does not exist +}","def test_deploy_image_invalid_UUID(run_api): + """""" + deploy with invalid UUID, The status code should be 404 + """""" + UUID = ""zxyz"" + ret = run_api.deploy_image(UUID, wait=False) + test_assert.status(ret, 404) + res = ret.json() + assert res[""result""] == 'FAILURE', res + assert 'does not exist' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying an image by an admin user,,"{ +""status"" : 200, +""response"" : image deployed +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_image_vm_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Deploying an Image by Admin + """""" + # Admin check of Starting a deployment created by different user + lib_id = custom_lib_non_admin_operations + r = run_api.deploy_image(lib_id) + test_assert.status(r, 200) + deploy_id = r.json()[""UUID""] + run_api.deploy_image_delete(deploy_id, params={}) +" +/deploy/rest/deploy/{{UUID}}/,deploying an image by a non-admin user,,"{ +""status"" : 403}","@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_image_vm_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Deploying an Image by Non-admin + """""" + # Non-admin check of Starting a deployment created by different user + lib_id = custom_lib_admin_operations + r = run_api.deploy_image(lib_id) + test_assert.status(r, 403) +" +/deploy/rest/deploy/{{UUID}}/,deploying an image by a manager,,,"endpoint = ""deploy_add"" + +PARAMETERS = [{""dest_obj"": OBJ_LIB}] +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_LIB, ""deploy_with"": SRV_MANAGER_RIGHTS}] +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_LIB, ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_image_vm_manager(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Deploying an Image by manager + """""" + # When the user is not part of the group that the manager manages + lib_id = custom_lib_admin_operations + r = run_api.deploy_image(lib_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + lib_id = custom_lib_non_admin_operations + r = run_api.deploy_image(lib_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with with null deployment strategy,"{ +deployment_strategy = None +}","{ +""status"" : 400, +""message"" : ""This field may not be null"" +}"," +def test_deploy_image_with_null_deployment_strategy(run_api, library_add_new_vm): + """""" + deploy a VM with null deployment strategy + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, deployment_strategy=None) + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'This field may not be null' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with invalid deployment strategy,"{ +deployment_strategy=""invalid"" +}","{ + ""status"": 400, + ""message"": ""Not a valid choice"" +}","def test_deploy_image_invalid_deployment_strategy(run_api, library_add_new_vm): + """""" + deploy a VM image with invalid deployment strategy + """""" + params, r = library_add_new_vm lib_id = r[""UUID""] r = run_api.deploy_image(lib_id, deployment_strategy=""invalid"") test_assert.status(r, 400) res = r.json() - assert res[""result""] == 'FAILURE', res - assert 'not a valid choice' in res[""error""], res + assert res[""result""] == 'FAILURE', res + assert 'not a valid choice' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with an empty vnc_password,"{ +vnc_password="""" +}","{ +""status"" : 400, +""message"" : ""This field may not be blank"" +}"," +def test_deploy_image_empty_vnc_password(run_api, library_add_new_vm): + """""" + deploy a VM image using empty vnc_password + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, vnc_password="""") + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'This field may not be blank' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a vnc_password that exceeds the 8 character limit,"{ +vnc_password=""+uCm7Z__YLP8kN(JwT{S]b*))Bvz:C[MRHbVkjlkhkl7GjL"" +}","{ +""status"" : 400, +""message"" : ""Ensure this field has no more than 8 characters"" +}","def test_deploy_image_too_long_vnc_password(run_api, library_add_new_vm): + """""" + deploy a VM image with vnc_password greater than 8 characters + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, vnc_password=""+uCm7Z__YLP8kN(JwT{S]b*))Bvz:C[MRHbVkjlkhkl7GjL"") + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'Ensure this field has no more than 8 characters' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a vnc_password that contains white spaces only,"{ +vnc_password="" "" +}","{ +""status"" : 400, +""message"" : ""This field may not be blank"" +}","def test_deploy_image_whitespace_vnc_password(run_api, library_add_new_vm): + """""" + deploy a VM image with valid vnc_password having only white spaces + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, vnc_password="" "") + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'This field may not be blank' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a vnc_password that contains special characters,"{ +vnc_password=""!@#$%"" +}","{ +""status"" : 200 +}","def test_deploy_image_special_char_vnc_password(run_api, library_add_new_vm): + """""" + deploy a VM image with vnc_password having special characters + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, vnc_password=""!@#$%"") + test_assert.status(r, 200) + deploy_id = r.json()[""UUID""] + r = run_api.deploy_image_delete(deploy_id, {}) +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a vnc_password set to null,"{ +vnc_password= None +}","{ +""status"" : 400, +""message"" : ""This field may not be null"" +}","def test_deploy_image_null_vnc_password(run_api, library_add_new_vm): + """""" + deploy a VM image using null vnc_password + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, vnc_password=None) + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'This field may not be null' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a valid vnc_password,"{ +vnc_password=""password"" +}","{ +""status"" : 200 +}","def test_deploy_image_valid_vnc_password(run_api, library_add_new_vm): + """""" + deploy a VM image using valid vnc_password + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, vnc_password=""password"") + test_assert.status(r, 200) + deploy_id = r.json()[""UUID""] + r = run_api.deploy_image_delete(deploy_id, {}) +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with a count parameter exceeding system limits,,"{ +""status"" : 400, +""message"" : ""Not enough RAM"" +}","def test_deploy_image_count_exceeding_limits(run_api, library_add_new_vm): + """""" + deploy a VM image with count exceeding the system limits + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + server_list = list(run_api.clm_my_servers.values()) + ram_all = 0 + for server in server_list: + r = run_api.server_details(server) + ram_all += r.json()[""total_ram""] + count = math.ceil((ram_all / 200)) * random.randint(100, 1000) + r = run_api.deploy_image(lib_id, count=count) + test_assert.status(r, 400) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'Not enough RAM' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machine where 'sync = False',"{ +sync=False +}","{ +""status"" : 200 +}","def test_deploy_image_sync_false(run_api, library_add_new_vm): + """""" + deploy a VM image with sync as false + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, sync=False) + test_assert.status(r, 200) + deploy_id = r.json()[""UUID""] + r = run_api.deploy_image_delete(deploy_id, {}) +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machine where 'deploy_start = False',"{ +deploy_start=False +}","{ +""status"" : 200 +}","def test_deploy_image_deploy_start_false(run_api, library_add_new_vm): + """""" + deploy a VM image with deploy_start as false + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, deploy_start=False) + test_assert.status(r, 200) + deploy_id = r.json()[""UUID""] + r = run_api.deploy_image_delete(deploy_id, {}) +" +/deploy/rest/deploy/{{UUID}}/,deploying a virtual machine by providing group name that user isn't part of,,"{ +""status"" : 400, +""message"" : ""You are not a part of the provided Group(s)"" +}"," +def test_deploy_image_group_list(run_api, library_add_new_vm): + """""" + deploy a VM image with group_list, which user is not part of + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + group_list = [] + all_groups = list(run_api.clm_my_groups.keys()) + while 1: + name = f""test{random.randint(1, 1000)}"" + if name not in all_groups: + group_list.append(name) + break + + r = run_api.deploy_image(lib_id, group_list=group_list) + res = r.json() + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE', res + assert 'You are not a part of the provided Group(s)' in res[""error""], res +" +/deploy/rest/deploy/{{UUID}}/,deploying a single VM with deploy_start set to True,"{ +lib_id, +deploy_start=True, +server_list=[server_list[0]] +}","{ +""status"" : 400, +""message"" : ""'Failed to deploy machine'"" +}","def test_deploy_image_deploy_start_true_one_vm(run_api): + """""" + deploy single VM with deploy_start as True + """""" + server_list = list(run_api.clm_my_servers.values()) + r = run_api.server_details(server_list[0]) + ram = r.json()[""total_ram""] + params, r = run_api.library_add_new_vm(ram=ram) + lib_id = r.json()[""UUID""] + r = run_api.deploy_image(lib_id, deploy_start=True, server_list=[server_list[0]]) + res = r.json() + test_assert.status(r, 400) + res[""result""] == 'FAILURE' + assert 'Failed to deploy machine' in res[""error""], res + run_api.library_delete(lib_id) +" +/deploy/rest/deploy/{{UUID}}/,deploying a single virtual machine with default synchronous behavior(sync=false),"{ +sync = False +}","{ +""status"" : 200, +""response"" : success +}","def test_deploy_image_sync_false(run_api, library_add_new_vm): + """""" + deploy a VM image with sync as false + """""" + params, r = library_add_new_vm + lib_id = r[""UUID""] + r = run_api.deploy_image(lib_id, sync=False) + test_assert.status(r, 200) + deploy_id = r.json()[""UUID""] + r = run_api.deploy_image_delete(deploy_id, {}) +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM without authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_details_without_authorization(anonymous_exec_api): + """""" + Getting Deploy details of the VM without authorization + """""" + deploy_id = ""invalid"" + depl_details = anonymous_exec_api.deploy_details(deploy_id) + depl_json = depl_details.json() + test_assert.status(depl_details, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM using valid machine id,,"{ +""status"" : 200 +}","def test_deploy_details(deploy_details): + """""" + Getting Deploy details of the VM + """""" + x, r = deploy_details + test_assert.status(r, 200) +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM using invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_details_invalid_token(invalid_exec_api): + """""" + Getting Deploy details of the VM using invalid token + """""" + deploy_id = ""invalid"" + depl_details = invalid_exec_api.deploy_details(deploy_id) + depl_json = depl_details.json() + test_assert.status(depl_details, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM using invalid machine id,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404 +}","def test_deploy_details_invalid_UUID(run_api): + """""" + Getting Deploy details of the VM using invalid id + """""" + deploy_id = ""invalid"" + r = run_api.deploy_details(deploy_id) + test_assert.status(r, 404) +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM by non-admin user,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_details_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Details of the VM by non-Admin + """""" + # Non-admin check for fetching details of a Deployed VM created by different user. + deploy_id = custom_lib_admin_operations + r = run_api.deploy_details(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM by manager who has rights over server,,," +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_details_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Details of the VM by Manager + """""" + # When the user is not part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_details(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_details(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM by manager who does not haverights over server,,,"PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_details_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" + Details of the VM by Manager + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_details(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_details(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) +" +/deploy/rest/details/{{UUID}}/,getting deployment details of a VM by Admin user,,"{ +""status"" : 200 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_details_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Details of the VM by Admin + """""" + # Admin check for fetching details of a Deployed VM created by different user. + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_details(deploy_id) + test_assert.status(r, 200) +" +/deploy/rest/mac_addr/((UUID}}/,getting MAC address without Authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_mac_addr_without_authorization(anonymous_exec_api): + """""" + fetching the mac address of VM without authorization + """""" + deploy_id = ""invalid"" + depl_mac_addr = anonymous_exec_api.deploy_mac_addr(deploy_id) + depl_json = depl_mac_addr.json() + test_assert.status(depl_mac_addr, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/mac_addr/((UUID}}/,getting MAC address when valid UUID is provided and machine is not connected to network.,,"{ +""status"" : 400, +""message"" : ""Mac Addr can only be fetched when machine is in running/pausing/paused state"" +}","def test_deploy_mac_addr_stopped_machine(run_api, deploy_image): + """""" + Get Mac Addr when machine is in stopped state + """""" + params, r = deploy_image + machine_id = r.json()[""UUID""] + res = run_api.deploy_mac_addr(deploy_id=machine_id) + test_assert.status(res, 400) + assert res.json()[""error""] == ""Mac Addr can only be fetched when machine is in running/pausing/paused state"" +" +/deploy/rest/mac_addr/((UUID}}/,getting MAC address when valid UUID is provided and machine is connected to network.,,"{ +""status"" : 200, +""response"" : ""MAC address of VM +}","def test_deploy_mac_addr(deploy_mac_addr): + """""" + fetching the mac address of VM + """""" + x, r = deploy_mac_addr + test_assert.status(r, 200) +" +/deploy/rest/mac_addr/((UUID}}/,getting MAC address when requested using invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_mac_addr_invalid_token(invalid_exec_api): + """""" + fetching the mac address of VM using invalid token + """""" + deploy_id = ""invalid"" + depl_mac_addr = invalid_exec_api.deploy_mac_addr(deploy_id) + depl_json = depl_mac_addr.json() + test_assert.status(depl_mac_addr, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/mac_addr/((UUID}}/,getting MAC address when invalid UUID is provided,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_mac_addr_invalid_UUID(run_api): + """""" + fetching the mac address of VM using invalid machine_id + """""" + deploy_id = ""invalid"" + r = run_api.deploy_mac_addr(deploy_id) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson + +" +/deploy/rest/pause/{{UUID}}/,pausing VM when valid UUID is provided and machine state is in running,,"{ +""status"" : 20, +""response"" :Machine state should be set to paused +}","def test_deploy_deploy_pause_self(deploy_pause): + """""" + Pausing the VM + """""" + x, r = deploy_pause + test_assert.status(r, 201) +" +/deploy/rest/pause/{{UUID}}/,pausing VM when valid UUID is provided and machine is in paused state,,"{ +""status"" : 400, +""message"" : ""Cannot perform pause operation on paused state of a machine"" +}","def test_deploy_deploy_pause_already_paused_vm(deploy_pause, run_api): + """""" + Pausing a vm that is already paused + """""" + x, r = deploy_pause + res = r.json() + deploy_id = res[""UUID""] + response = run_api.deploy_pause(deploy_id) + test_assert.status(response, 400) + rjson = response.json()[""error""] + assert rjson == ""Cannot perform pause operation on paused state of a machine"" +" +/deploy/rest/pause/{{UUID}}/,pausing VM using valid deploy_id by non-admin user,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_pause_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Pausing the VM by non-admin + """""" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_pause(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/pause/{{UUID}}/,pausing VM using valid deploy_id by manager who has rights over server,,," +endpoint = ""deploy_pause"" + +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_deploy_pause_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Pausing the VM by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_pause(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_pause(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True))" +/deploy/rest/pause/{{UUID}}/,pausing VM using valid deploy_id by manager who do not have rights over server,,,"endpoint = ""deploy_pause"" + +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_deploy_pause_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Pausing the VM by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_pause(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_pause(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) +" +/deploy/rest/pause/{{UUID}}/,pausing VM using valid deploy_id by admin user,,"{ +""status"" : 201 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_pause_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Pausing the VM by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_pause(deploy_id) + test_assert.status(r, 201) +" +/deploy/rest/pause/{{UUID}}/,pausing a running VM without Authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_deploy_pause_without_authorization(anonymous_exec_api): + """""" + Pausing the VM without authorization + """""" + deploy_id = ""invalid"" + depl_pause = anonymous_exec_api.deploy_pause(deploy_id, wait=False) + depl_json = depl_pause.json() + test_assert.status(depl_pause, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/pause/{{UUID}}/,pausing a running VM when requested with invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_deploy_pause_invalid_token(invalid_exec_api): + """""" + Pausing the VM using invalid tokens + """""" + deploy_id = ""invalid"" + depl_pause = invalid_exec_api.deploy_pause(deploy_id, wait=False) + depl_json = depl_pause.json() + test_assert.status(depl_pause, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/pause/{{UUID}}/,pausing a running VM when Invalid UUID is provided,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_deploy_pause_invalid_UUID(run_api): + """""" + Pausing the VM with invalid deploy_id + """""" + deploy_id = ""invalid"" + r = run_api.deploy_pause(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/reboot/{{UUID}}/,rebooting a VM when it is in running state using valid id,,"{ +""status"" : 201 +}","def test_deploy_deploy_reboot_self(deploy_reboot): + """""" + Rebooting the VM + """""" + r = deploy_reboot + test_assert.status(r, 201) +" +/deploy/rest/reboot/{{UUID}}/,rebooting a running VM without authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_deploy_reboot_without_authorization(anonymous_exec_api): + """""" + Rebooting a VM without authorization + """""" + deploy_id = ""invalid"" + depl_reboot = anonymous_exec_api.deploy_reboot(deploy_id, wait=False) + depl_json = depl_reboot.json() + test_assert.status(depl_reboot, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" + +" +/deploy/rest/reboot/{{UUID}}/,rebooting a running VM when requested with invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_deploy_reboot_invalid_token(invalid_exec_api): + """""" + Rebooting a VM using invalid token + """""" + deploy_id = ""invalid"" + depl_reboot = invalid_exec_api.deploy_reboot(deploy_id, wait=False) + depl_json = depl_reboot.json() + test_assert.status(depl_reboot, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/reboot/{{UUID}}/,rebooting a running VM when Invalid UUID is provided,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_deploy_reboot_invalid_UUID(run_api): + """""" + Rebooting a VM using invalid UUID + """""" + deploy_id = ""invalid"" + r = run_api.deploy_reboot(deploy_id) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/reboot/{{UUID}}/,manager rebooting a VM when it is in running state using valid id where the manager has rights over the servers,,,"endpoint = ""deploy_reboot"" + +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_deploy_reboot_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Rebooting the VM by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_reboot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_reboot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + run_api.deploy_stop(deploy_id) +" +/deploy/rest/reboot/{{UUID}}/,manager rebooting a VM when it is in running state using valid id where the manager do not have rights over the servers,,,"endpoint = ""deploy_reboot"" + +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_deploy_reboot_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Rebooting the VM by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_reboot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_reboot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + +" +/deploy/rest/reboot/{{UUID}}/,admin rebooting a VM when it is in running state using valid id,,"{ +""status"" : 201 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_reboot_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Rebooting the VM by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_reboot(deploy_id) + test_assert.status(r, 201) +" +/deploy/rest/reboot/{{UUID}}/,a non-admin user rebooting a VM when it is in running state using valid id,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_reboot_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Rebooting the VM by non-admin + """""" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_reboot(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/reset/{{UUID}}/,resetting a VM without authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_deploy_reset_without_authorization(anonymous_exec_api): + """""" + resetting a VM without authorization + """""" + deploy_id = ""invalid"" + depl_reset = anonymous_exec_api.deploy_reset(deploy_id, wait=False) + depl_json = depl_reset.json() + test_assert.status(depl_reset, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided.""" +/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided by manager who has rights over servers,,,"endpoint = ""deploy_reset"" + +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_deploy_reset_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Resetting the VM by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_reset(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_reset(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + run_api.deploy_stop(deploy_id) +" +/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided by manager who do not have rights over servers,,,"endpoint = ""deploy_reset"" + +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_deploy_reset_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Resetting the VM by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_reset(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_reset(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) +" +/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided by an admin user,,"{ +""status"" : 201 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_reset_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Resetting the VM by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_reset(deploy_id) + test_assert.status(r, 201) " -/deploy/rest/deploy/{{UUID}}/,deploying a virtual machines with with null deployment strategy,"{ -deployment_strategy = None +/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided by a non-admin user,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_reset_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Resetting the VM by non-admin + """""" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_reset(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided,,"{ +""status"" : 201 +}","def test_deploy_deploy_reset_self(deploy_reset): + """""" + Resetting the VM + """""" + r = deploy_reset + test_assert.status(r, 201) +" +/deploy/rest/reset/{{UUID}}/,resetting a VM when requested with invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_deploy_reset_invalid_token(invalid_exec_api): + """""" + resetting a VM for invalid token + """""" + deploy_id = ""invalid"" + depl_reset = invalid_exec_api.deploy_reset(deploy_id, wait=False) + depl_json = depl_reset.json() + test_assert.status(depl_reset, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/reset/{{UUID}}/,resetting a VM when invalid UUID is provided,"{ +deploy_id = ""invalid"" }","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_deploy_reset_invalid_UUID(run_api): + """""" + resetting a VM for invalid UUID + """""" + deploy_id = ""invalid"" + r = run_api.deploy_reset(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/resume/{{UUID}}/,resuming a VM when valid UUID is provided and machine is in resumed state,,"{ ""status"" : 400, -""message"" : ""This field may not be null"" -}"," -def test_deploy_image_with_null_deployment_strategy(run_api, library_add_new_vm): +""message"" : ""Cannot perform resume operation on running state of a machine"" +}","def test_deploy_deploy_resume_already_resumed_vm(deploy_resume, run_api): """""" - deploy a VM with null deployment strategy + resuming a VM which is already resumed """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - r = run_api.deploy_image(lib_id, deployment_strategy=None) - test_assert.status(r, 400) + x, r = deploy_resume res = r.json() - assert res[""result""] == 'FAILURE', res - assert 'This field may not be null' in res[""error""], res + deploy_id = res[""UUID""] + response = run_api.deploy_resume(deploy_id) + test_assert.status(response, 400) + rjson = response.json()[""error""] + assert rjson == ""Cannot perform resume operation on running state of a machine"" " -/deploy/rest/deploy/{{UUID}}/,deploying an image by a manager,,,"endpoint = ""deploy_add"" +/deploy/rest/resume/{{UUID}}/,resuming a VM when valid UUID is provided and machine is in paused state,,"{ +""status"" : 200, +""response"" : Machine state set to running +}","def test_deploy_deploy_resume_self(deploy_resume): + """""" + Resuming the VM + """""" + x, r = deploy_resume + test_assert.status(r, 201) +" +/deploy/rest/resume/{{UUID}}/,resuming a VM by an admin user when valid UUID is provided and machine is in paused state,,"{ +""status"" : 201 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_resume_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Resuming the VM by Admin + """""" + # Admin check of Resuming a deployment created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_resume(deploy_id) + test_assert.status(r, 201) +" +/deploy/rest/resume/{{UUID}}/,resuming a VM by a non-admin user when valid UUID provided and machine is in paused state,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""paused""]}] -PARAMETERS = [{""dest_obj"": OBJ_LIB}] -PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_LIB, ""deploy_with"": SRV_MANAGER_RIGHTS}] -PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_LIB, ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploy_resume_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Resuming the VM by non-admin + """""" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_resume(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/resume/{{UUID}}/,"resuming a VM by a manager when valid UUID provided, machine is in paused state but manager do not have rights over servers",,,"endpoint = ""deploy_resume"" + +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""paused""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_deploy_resume_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Resuming the VM by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_resume(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_resume(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) +" +/deploy/rest/resume/{{UUID}}/,"resuming a VM by a manager when valid UUID provided, machine is in paused state and manager has rights over servers",,," +endpoint = ""deploy_resume"" +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""paused""], ""deploy_with"": SRV_MANAGER_RIGHTS}] @pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) @pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_deploy_image_vm_manager(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): +def test_deploy_deploy_resume_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Resuming the VM by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_resume(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_resume(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) +" +/deploy/rest/resume/{{UUID}}/,resuming a paused VM without Authorization,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_deploy_resume_without_authorization(anonymous_exec_api): + """""" + resuming a paused VM without authorization + """""" + deploy_id = ""invalid"" + depl_resume = anonymous_exec_api.deploy_resume(deploy_id, wait=False) + depl_json = depl_resume.json() + test_assert.status(depl_resume, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/resume/{{UUID}}/,resuming a paused VM when requested with invalid token,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_deploy_resume_invalid_token(invalid_exec_api): + """""" + resuming a paused VM using invalid token + """""" + deploy_id = ""invalid"" + depl_resume = invalid_exec_api.deploy_resume(deploy_id, wait=False) + depl_json = depl_resume.json() + test_assert.status(depl_resume, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/resume/{{UUID}}/,resuming a paused VM when Invalid UUID is provided,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_deploy_resume_invalid_UUID(run_api): + """""" + resuming a VM which is paused using invalid deploy_id + """""" + deploy_id = ""invalid"" + r = run_api.deploy_resume(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine when valid UUID is provided and machine is in stopped state,,"{ +""status"" : 400, +""message"" : ""Cannot perform shutdown operation on stopped state of a machine"" +}","def test_deploy_shutdown_already_stopped_vm(run_api, deploy_stop): + """""" + Shutdown the VM that is in stopped state + """""" + x, result = deploy_stop + deploy_id = x[""UUID""] + response = run_api.deploy_shutdown(deploy_id) + test_assert.status(response, 400) + rjson = response.json()[""error""] + assert rjson == ""Cannot perform shutdown operation on stopped state of a machine"" +" +/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine when valid UUID is provided and machine is in paused state,,"{ +""status"" : 400, +""message"" : ""Cannot perform shutdown operation on paused state of a machine"" +}","def test_deploy_shutdown_paused_vm(deploy_start, run_api): + """""" + Shutdown the VM which is in pause state + """""" + x, r = deploy_start + deploy_id = x[""UUID""] + run_api.deploy_pause(deploy_id, wait=True) + res = run_api.deploy_shutdown(deploy_id) + test_assert.status(res, 400) + assert res.json()[""error""] == ""Cannot perform shutdown operation on paused state of a machine. Try `STOP` instead."" +" +/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine when requested with invalid token,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_shutdown_invalid_token(invalid_exec_api): + """""" + Shutdown the VM using invalid token + """""" + deploy_id = ""invalid"" + depl_shutdown = invalid_exec_api.deploy_shutdown(deploy_id, wait=False) + depl_json = depl_shutdown.json() + test_assert.status(depl_shutdown, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine when no machine exists for the deploy id,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_shutdown_invalid_UUID(run_api): + """""" + Shutdown the VM using id for which machine does not exist + """""" + deploy_id = ""invalid"" + r = run_api.deploy_shutdown(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine that is in running state,,"{ +""status"" : 201, +""response"" : Machine shutdown +}","def test_deploy_shutdown_self(deploy_shutdown): """""" - Deploying an Image by manager + Shutdown the VM """""" - # When the user is not part of the group that the manager manages - lib_id = custom_lib_admin_operations - r = run_api.deploy_image(lib_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + param, r = deploy_shutdown + test_assert.status(r, 201) - # When the user is part of the group that the manager manages and deployment is on manager rights to server - lib_id = custom_lib_non_admin_operations - r = run_api.deploy_image(lib_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) " -/deploy/rest/deploy/{{UUID}}/,deploying an image by a non-admin user,,"{ -""status"" : 403}","@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_image_vm_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): +/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine by non-admin when valid UUID is provided and machine is in running state ,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_shutdown_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): """""" - Deploying an Image by Non-admin + Shutdown the VM by non-admin """""" - # Non-admin check of Starting a deployment created by different user - lib_id = custom_lib_admin_operations - r = run_api.deploy_image(lib_id) + # Non-admin check of shutdown a deployment created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_shutdown(deploy_id) test_assert.status(r, 403) " -/deploy/rest/deploy/{{UUID}}/,deploying an image by an admin user,,"{ -""status"" : 200, -""response"" : image deployed -}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_image_vm_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): +/deploy/rest/shutdown/{{UUID}}/,"shutting down the deployment of machine by manager when valid UUID is provided ,machine is in running state but manager do not have rights over servers",,,"PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_shutdown_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - Deploying an Image by Admin + Rebooting the VM by manager when have no right on server """""" - # Admin check of Starting a deployment created by different user - lib_id = custom_lib_non_admin_operations - r = run_api.deploy_image(lib_id) - test_assert.status(r, 200) - deploy_id = r.json()[""UUID""] - run_api.deploy_image_delete(deploy_id, params={}) + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_shutdown(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_shutdown(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) " -/deploy/rest/deploy/{{UUID}}/,deploying an image when invalid UUID is provided,"{ - UUID = ""zxyz"" +/deploy/rest/shutdown/{{UUID}}/,"shutting down the deployment of machine by manager when valid UUID is provided ,machine is in running state and manager has rights over servers",,,"PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] -}","{ -""status"" : 404, -""message"" : Machine does not exist -}","def test_deploy_image_invalid_UUID(run_api): +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_shutdown_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - deploy with invalid UUID, The status code should be 404 + Shutdown the VM by manager when have right on server """""" - UUID = ""zxyz"" - ret = run_api.deploy_image(UUID, wait=False) - test_assert.status(ret, 404) - res = ret.json() - assert res[""result""] == 'FAILURE', res - assert 'does not exist' in res[""error""], res + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_shutdown(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_shutdown(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) " -/deploy/rest/deploy/{{UUID}}/,deploying an image with invalid token provided,"{ - deploy_id = ""invalid"" +/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine without Authorization,"{ + deploy_id = ""invalid"" }","{ ""status"" : 401, -""message"" : ""Invalid token"" -}","def test_deploy_image_invalid_token(invalid_exec_api): - deploy_id = ""invalid"" - depl_image = invalid_exec_api.deploy_image(deploy_id, wait=False) - depl_json = depl_image.json() - test_assert.status(depl_image, 401) - assert depl_json[""detail""] == ""Invalid token."" -" -/deploy/rest/deploy/{{UUID}}/,deploying an image with valid data,,"{ -""status"" : 200, -""response"" : image deployed -}","def test_deploy_image_vm_self(deploy_image): +""message"" : ""Authentication credentials were not provided"" +}"," +def test_deploy_shutdown_without_authorization(anonymous_exec_api): """""" - Deploy image + Shutdown the VM without authorization """""" - template, r = deploy_image - res = r.json() - test_assert.status(res, template, ""deploy_image"") - test_assert.status(r, 200) + deploy_id = ""invalid"" + depl_shutdown = anonymous_exec_api.deploy_shutdown(deploy_id, wait=False) + depl_json = depl_shutdown.json() + test_assert.status(depl_shutdown, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" " -/deploy/rest/deploy/{{UUID}}/,deploying an image without Authorization,"{ - deploy_id = ""invalid"" +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine without Authorization,"{ +deploy_id = ""invalid"" }","{ ""status"" : 401, ""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_image_without_authorization(anonymous_exec_api): +}","def test_deploy_snapshot_without_authorization(anonymous_exec_api): + """""" + Snapshot of the machine without authorization + """""" deploy_id = ""invalid"" - depl_image = anonymous_exec_api.deploy_image(deploy_id, wait=False) - depl_json = depl_image.json() - test_assert.status(depl_image, 401) + depl_snapshot = anonymous_exec_api.deploy_snapshot(deploy_id, wait=False) + depl_json = depl_snapshot.json() + test_assert.status(depl_snapshot, 401) assert depl_json[""detail""] == ""Authentication credentials were not provided."" " -/deploy/rest/deploy/{{UUID}}/,deploying multiple virtual machines with default synchronous behavior.,"{ -deploy_start=True +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine with description,"{ +""description"" : ""description here"" }","{ -""status"" : 200, -""response"" : success -}","def test_deploy_image_deploy_start_true_multiple_vm(run_api, library_add_new_vm): +""status"" : 201, +}","def test_deploy_snapshot_machine_with_description(run_api, deploy_image): """""" - deploy multiple VM's with deploy_start as True + Snapshot of the machine while giving description """""" - params, r = library_add_new_vm - lib_id = r[""uuid""] - server_list = list(run_api.clm_my_servers.values()) - r = run_api.server_details(server_list[0]) - ram = r.json()[""total_ram""] - count = math.ceil(ram / 200) + 1 - r = run_api.deploy_image(lib_id, count=count, deploy_start=True, server_list=[server_list[0]]) - res = r.json() - test_assert.status(r, 400) - res[""result""] == 'FAILURE' - assert 'Not enough RAM' in res[""error""], res + params, r = deploy_image + machine_id = r.json()[""UUID""] + description = ""Random"" + res = run_api.deploy_snapshot(deploy_id=machine_id, description=description) + snapshotted_machine_UUID = res.json()['snapshotted_machine_UUID'] + current_desp = run_api.library_details(UUID=snapshotted_machine_UUID, params={}).json()[""description""] + run_api.library_delete(snapshotted_machine_UUID, {}) + test_assert.status(res, 201) + assert current_desp == description, ""The error is %s"" % res.json() " -/deploy/rest/deploy/{{UUID}}/,deploying virtual machines with a count parameter set within system limits.,"{ -count : 2 -}","{ -""status"" : 200, -""response"" : success -}","def test_deploy_image_multiple_count(run_api, library_add_new_vm): +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine which is part of the island,,"{ +""message"" : ""Snapshot of machine which is part of island is not allowed"" +}","def test_deploy_snapshot_machine_part_of_island(run_api, ideploy_deploy): """""" - deploy a VM image with multiple count + Snapshot of the machine that is part of the island """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - r = run_api.deploy_image(lib_id, count=2) - res = r.json() - if 'bulk_job_UUID' in res: - test_assert.status(r, 200) - for deployment in res[""deployments""]: - deploy_id = deployment[""UUID""] - r = run_api.deploy_image_delete(deploy_id, {}) - else: - test_assert.status(r, 400) - assert res[""result""] == 'FAILURE', res - assert 'Not enough RAM' in res[""error""], res - + params, r = ideploy_deploy + deploy_id = r.json()[""deploy_UUID""] + machine_id = run_api.ideploy_details(UUID=deploy_id).json()[""machines""][0][""UUID""] + res = run_api.deploy_snapshot(deploy_id=machine_id) + assert res.json()[""error""] == ""Snapshot of machine which is part of island is not allowed"" " -/deploy/rest/deploy/{{UUID}}/,deploying VM with a 'count' parameter set to negative value,"{ -count : -2 -}","{ +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine when valid UUID is provided and machine is in running state,,"{ ""status"" : 400, -""message"" : ""Ensure this value is greater than or equal to 1"" -}"," -def test_deploy_image_negative_count(run_api, library_add_new_vm): +""message"" : ""Cannot perform snapshot operation on running state of a machine""","def test_deploy_snapshot_running_vm(run_api, deploy_start): """""" - deploy a VM image with negative count value + Snapshot of the machine which is in running state """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - r = run_api.deploy_image(lib_id, count=-2) - test_assert.status(r, 400) - res = r.json() - assert res[""result""] == 'FAILURE', res - assert 'Ensure this value is greater than or equal to 1' in res[""error""], res + x, result = deploy_start + deploy_id = x[""UUID""] + response = run_api.deploy_snapshot(deploy_id, wait=False) + test_assert.status(response, 400) + rjson = response.json()[""error""] + assert rjson == ""Cannot perform snapshot operation on running state of a machine"" " -/deploy/rest/deploy/{{UUID}}/,deploying zero virtual machines by setting the 'count' param to zero,"{ -count : 0 +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine when requested using invalid token,"{ +deploy_id = ""invalid"" }","{ -""status"" : 400, -""message"" : ""Ensure this value is greater than or equal to 1"" -}","def test_deploy_image_zero_count(run_api, library_add_new_vm): +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_snapshot_invalid_token(invalid_exec_api): """""" - deploy a VM image with zero count value + Snapshot of the machine using invalid tokens """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - r = run_api.deploy_image(lib_id, count=0) - test_assert.status(r, 400) - res = r.json() - assert res[""result""] == 'FAILURE', res - assert 'Ensure this value is greater than or equal to 1' in res[""error""], res + deploy_id = ""invalid"" + depl_snapshot = invalid_exec_api.deploy_snapshot(deploy_id, wait=False) + depl_json = depl_snapshot.json() + test_assert.status(depl_snapshot, 401) + assert depl_json[""detail""] == ""Invalid token."" " -/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image,,"{ -""status"" : 200, -""response"" : machine details -}","@pytest.mark.skip() -def test_deploy_list(deploy_list): +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine when Invalid UUID is provided,"{ +deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist""","def test_deploy_snapshot_invalid_UUID(run_api): + """""" + Snapshot of the machine using an invalid machine uuid + """""" + deploy_id = ""invalid"" + r = run_api.deploy_snapshot(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine using a valid UUID and machine state is in stopped state,,"{ +""status"" : 201, +""response"" : Snapshot taken +}","def test_deploy_snapshot_self(deploy_snapshot): """""" - Fetching the list of deployed images + Snapshot the VM """""" - r = deploy_list - test_assert.status(r, 200) + r = deploy_snapshot + test_assert.status(r, 201)" +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine by non-admin user using valid UUID and machine state is in stopped state,,"{ +""status"" : 403 +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_snapshot_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Snapshot the VM by non-admin + """""" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_snapshot(deploy_id) + test_assert.status(r, 403) " -/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'arch' param,"params = {""arch"": arch}",,"def test_deploy_fetch_with_arch(deploy_image, run_api): +/deploy/rest/snapshot/{{UUID}}/,"taking snapshot of the virtual machine by manager using valid UUID, machine state is in stopped state but the manager do not have rights over the server",,,"endpoint = ""deploy_snapshot"" + +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_snapshot_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - Fetch list with 'arch' + Snapshot the VM by manager when have no right on server """""" - params, r = deploy_image - rjson = r.json() - mc_id = rjson['UUID'] - arch = rjson['machine']['hw']['arch'] - params = {""arch"": arch} - rjson = run_api.deploy_list(params).json() - all_UUID = [mc['UUID'] for mc in rjson['results']] - assert mc_id in all_UUID, ""|> Json %s"" % rjson - for machines in rjson['results']: - assert machines['machine']['hw']['arch'] == arch, ""Json |> %s"" % machines + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_snapshot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_snapshot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) " -/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'disks__UUID' param,"params = { -""disk_UUID"": valid_existing_disk -}",,"def test_deploy_list_fetch_with_disk_UUID(deploy_image, run_api): +/deploy/rest/snapshot/{{UUID}}/,"taking snapshot of the virtual machine by manager using valid UUID, machine state is in stopped state and the manager has rights over the servers",,,"endpoint = ""deploy_snapshot"" +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) + +def test_deploy_snapshot_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - Fetch deploy list with 'disks_UUID' param + Snapshot the VM by manager when have right on server """""" - template, r = deploy_image - rjson = r.json() - params = {""disk_UUID"": rjson['machine']['hw']['disks'][0]['UUID']} - assert run_api.deploy_list(params).json()['count'] == 1 + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_snapshot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_snapshot(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + run_api.deploy_stop(deploy_id) " -/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'hvm_type' param,"params = {""hvm_type"": kvm}",,"def test_deploy_fetch_with_hvm_type(deploy_image, run_api): +/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine by Admin user using valid UUID and machine state is in stopped state,,"{ +""status"" : 201, +""response"" : Snapshot taken +}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_snapshot_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): """""" - Fetch list with 'hvm_type' + Snapshot the VM by Admin """""" - params, r = deploy_image + # Admin check of Starting a deployment created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_snapshot(deploy_id) + test_assert.status(r, 201) rjson = r.json() - kvm = rjson['machine']['hw']['hvm_type'] - params = {""hvm_type"": kvm} - rjson = run_api.deploy_list(params).json() - for machines in rjson['results']: - assert machines['machine']['hw']['hvm_type'] == kvm, ""Json |> %s"" % machines + run_api.library_delete(rjson['snapshotted_machine_UUID'], {}) " -/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'iso' param,"cdrom = [{ - ""type"": ""sata"", - ""iso"": rand_string(), - ""is_boot"": True, - ""boot_order"": 1 - } - ] -",,"def test_deploy_fetch_with_iso(run_api): +/deploy/rest/start/{{UUID}}/,starting machine deployment when requested with invalid token,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_start_invalid_token(invalid_exec_api): """""" - Fetch list with 'iso' + starting machine deployment using invalid token """""" - cdrom = [{ - ""type"": ""sata"", - ""iso"": rand_string(), - ""is_boot"": True, - ""boot_order"": 1 - } - ] - params, r = run_api.library_add_new_vm(cdrom=cdrom) - lib_id = r.json()[""UUID""] - response = run_api.deploy_image(lib_id) - machine_id = response.json()[""UUID""] - params = {""iso"": response.json()['machine']['hw']['cdrom'][-1]['iso']} - assert run_api.deploy_list(params).json()['count'] == 1 - run_api.deploy_image_delete(deploy_id=machine_id) - run_api.library_delete(lib_id) + deploy_id = ""invalid"" + depl_start = invalid_exec_api.deploy_start(deploy_id, wait=False) + depl_json = depl_start.json() + test_assert.status(depl_start, 401) + assert depl_json[""detail""] == ""Invalid token."" " -/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'mac' param,"params = {""mac"": mac}",,"def test_deploy_list_fetch_with_mac(run_api): +/deploy/rest/start/{{UUID}}/,starting machine deployment on a machine which is in paused state,,"{ +""status"" : 400, +""message"" : 'Cannot perform start operation on paused state of a machine' +}","def test_deploy_start_paused_vm(deploy_start, run_api): """""" - Fetch deploy list with 'mac' param + starting a machine that is in paused state """""" - mac = ""5A:54:00:12:23:34"" - params = {""mac"": mac} - rjson = run_api.deploy_list(params).json() - for machines in rjson['results']: - all_macs = [network['mac'] for network in machines['machine']['hw']['networks']] - assert mac in all_macs, ""Json |> %s"" % machines - + x, r = deploy_start + deploy_id = x[""UUID""] + run_api.deploy_pause(deploy_id, wait=True) + res = run_api.deploy_start(deploy_id) + test_assert.status(res, 400) + assert res.json()[""error""] == 'Cannot perform start operation on paused state of a machine' " -/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'scope' param set to 'all',"params = {'scope': ""all"", 'UUID': deploy_id}",,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_list_with_scope_all(skip_if_not_admin, run_api, custom_lib_non_admin_operations): +/deploy/rest/start/{{UUID}}/,starting deployment of machine without Authorization,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_start_without_authorization(anonymous_exec_api): """""" - fetch list with scope all + starting machine deployment without authorization """""" - deploy_id = custom_lib_non_admin_operations - params = {'scope': ""all"", 'UUID': deploy_id} - rjson = run_api.deploy_list(params).json() - assert rjson['count'] == 1, ""The error is %s"" % rjson + deploy_id = ""invalid"" + depl_start = anonymous_exec_api.deploy_start(deploy_id, wait=False) + depl_json = depl_start.json() + test_assert.status(depl_start, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" " -/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'scope' param set to invalid scope name,"params = {'scope': ""invalid"", 'UUID': machine_id}",,"def test_deploy_list_with_invaild_scope_name(run_api, deploy_image): +/deploy/rest/start/{{UUID}}/,starting deployment of machine using Valid UUID which is already in a running state,,"{ +""status"" : 400, +""message"" : ""Cannot perform start operation on running state of a machine"" +}","def test_deploy_start_already_running_vm(deploy_start, run_api): """""" - fetch list with invalid scope name + starting a machine that is already running """""" - p, r = deploy_image - machine_id = r.json()['UUID'] - params = {'scope': ""invalid"", 'UUID': machine_id} - rjson = run_api.deploy_list(params).json() # 'my' is default scope gets applied on invalid scope - assert rjson['count'] == 1, ""The error is %s"" % rjson - + x, r = deploy_start + deploy_id = x[""UUID""] + r = run_api.deploy_start(deploy_id, wait=False) + test_assert.status(r, 400) + rjson = r.json()[""error""] + assert rjson == ""Cannot perform start operation on running state of a machine"" " -/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'state' param,"params = {""state"": state}",,"def test_deploy_list_with_machine_state_filter(run_api): +/deploy/rest/start/{{UUID}}/,starting deployment of machine using invalid id for which no machine exists,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_start_invalid_UUID(run_api): """""" - fetch list with deploy machine state filter + starting a nonexisting machine using UUID """""" - state = ""stopped"" - params = {""state"": state} - rjson = run_api.deploy_list(params).json() - for machines in rjson['results']: - assert machines['state'] == state, ""Json |> %s"" % machines + deploy_id = ""invalid"" + r = run_api.deploy_start(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson " -/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with 'tags_list' param,"params = {""machine_list"": [machine_id], ""tags_list"": [[tag]]}",,"def test_deploy_fetch_with_tags(deploy_image, run_api): +/deploy/rest/start/{{UUID}}/,"starting deployment of machine by manager with a valid UUID and machine is in stopped state , where manager has rights over servers",,," +endpoint = ""deploy_start"" + +PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_start_vm_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - Fetch list with tags + Deploying a Image and Starting the VM and then Stopping """""" - params, r = deploy_image - machine_id = r.json()[""UUID""] - tag = rand_string() - params = {""machine_list"": [machine_id], ""tags_list"": [[tag]]} - run_api.deploy_add_tags(params=params) - res = run_api.deploy_list(params={""tags"": tag}) - assert res.json()[""count""] == 1, ""The error is %s"" % res.json() + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + run_api.deploy_stop(deploy_id) " -/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with added created and update DateTime Filter,,"{ -""status"" : 400 -}","def test_deploy_filter_timefilter(run_api: apiops, library_add_new_vm): +/deploy/rest/start/{{UUID}}/,"starting deployment of machine by manager with a valid UUID and machine is in stopped state , where manager does not have rights over servers",,," +endpoint = ""deploy_start"" + +PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_start_vm_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - Filter on created and update DateTime Filter + Deploying a Image and Starting the VM and then Stopping """""" - template, r = library_add_new_vm - lib_id = r[""UUID""] - r = run_api.deploy_image(lib_id) - deploy_id = r.json()[""UUID""] - run_api.deploy_start(deploy_id) - r_details = run_api.deploy_details(deploy_id).json() - # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' - str_utime = r_details['utime'].replace('T', ' ').replace('Z', '') - str_ctime = r_details['created_on'].replace('T', ' ').replace('Z', '') - datetime_utime = convert_datetime_stringform(r_details['utime']) - datetime_ctime = convert_datetime_stringform(r_details['created_on']) - - def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): - """""" - Function to handle corner case if machine was created a day before and test get triggered on new day - """""" - if not utc: - created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' - created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, - ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + run_api.deploy_stop(deploy_id) - assert datetime_ctime < datetime_utime, f""The details of the Deployment is {r_details}"" - # Filter on UTC time - # .... When the datetime is selected to be the same as in detail - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, - ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""page_size"": 1}).json()['count'] == 0 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), - ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.deploy_list({""UUID"": deploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""page_size"": 1}).json()['count'] == 1 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.deploy_list({""UUID"": deploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), - ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 - # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # .........When the created_date_range format is invalid - response = run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) - test_assert.status(response, 400) - assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() - # .........When the created_start_date and created_end_date has white spaces in them - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""page_size"": 1}).json()['count'] == 1 + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + run_api.deploy_stop(deploy_id) +" +/deploy/rest/start/{{UUID}}/,starting deployment of machine by admin with a valid UUID and machine is in stopped state,,"{ +""status"" : 201, +""response"" : Machine should got to running state +}"," +endpoint = ""deploy_start"" - # Filter on list time - # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 - # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 - # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 0 - # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.deploy_list({""UUID"": deploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 1 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.deploy_list({""UUID"": deploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on week's last day at 23:59:59.9999999 list and test get triggered on new week at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on month's last day at 23:59:59.9999999 list and test get triggered on new month at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on year's last day at 23:59:59.9999999 list and test get triggered on new year at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # .........When the created_date_range format is invalid - response = run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""invalid"", ""page_size"": 1}) - assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() - # .........When the created_start_date and created_end_date has white spaces in them - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", - ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 +PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] +PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] +PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] - def handle_trigger_delay_filtering_for_last_op(last_op_start_date, last_op_end_date, last_op_date_range, utc=True): - """""" - Function to handle corner case if machine has last operation a day before and test get triggered on new day - """""" - if not utc: - last_op_start_date = convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30' - last_op_end_date = convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30' - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": last_op_start_date, ""last_op_end_date"": last_op_end_date, - ""last_op_date_range"": last_op_date_range, ""page_size"": 1}).json()['count'] == 1 - # Filter on UTC time - # .... When the datetime is selected to be the same as in detail - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, - ""last_op_end_date"": str_utime, ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'last_op_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=1)), - ""page_size"": 1}).json()['count'] == 0 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=-1)), - ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'last_op_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=1)), - ""page_size"": 1}).json()['count'] == 1 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=-1)), - ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'last_op_start_date' and 'last_op_end_date' when passed blank string - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": """", ""last_op_end_date"": """", ""page_size"": 1}).json()['count'] == 1 - # ........Filter on 'last_op_start_date' and 'last_op_end_date' when last_op_start_date is greater than last_op_end_date - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=1)), - ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'last_op_start_date', 'last_op_end_date' and 'last_op_date_range'. - # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'today - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine has done last operation yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 - handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") - # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'yesterday - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 - except AssertionError: - # when machine has done last operation yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 - handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") - # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'week - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine has done last operation on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 - handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") - # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'month - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine has done last operation on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 - handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") - # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'year' - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine has done last operation on year's last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 - handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"") - # .........When the last_op_date_range format is invalid - response = run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": str_utime, ""last_op_end_date"": str_utime, ""last_op_date_range"": ""invalid"", ""page_size"": 1}) +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_start_vm_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Deploying a Image and Starting the VM and then Stopping by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_start(deploy_id) + test_assert.status(r, 201) + run_api.deploy_stop(deploy_id)" +/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine without authorization,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_deploy_stop_without_authorization(anonymous_exec_api): + """""" + stopping machine deployment without authorization + """""" + deploy_id = ""invalid"" + depl_stop = anonymous_exec_api.deploy_stop(deploy_id, wait=False) + depl_json = depl_stop.json() + test_assert.status(depl_stop, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" +" +/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine when valid UUID is provided and machine is in stopped state,,"{ +""status"" : 400, +""message"" : ""Cannot perform power off operation on stopped state of a machine"" +}","def test_deploy_stop_already_stopped_vm(run_api, deploy_stop): + """""" + stopping machine deployment when machine in stopped state + """""" + x, result = deploy_stop + deploy_id = x[""UUID""] + response = run_api.deploy_stop(deploy_id) test_assert.status(response, 400) - assert response.json()['last_op_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() - # .........When the last_op_start_date and last_op_end_date has white spaces in them - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": "" "" + str_utime + "" "", ""last_op_end_date"": "" "" + str_utime + "" "", ""page_size"": 1}).json()['count'] == 1 + rjson = response.json()[""error""] + assert rjson == ""Cannot perform power off operation on stopped state of a machine"" +" +/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine when valid UUID is provided and machine is in running state,,"{ +""status"" : 201 +}","def test_deploy_stop_self(deploy_stop): + """""" + stopping machine deployment + """""" + x, r = deploy_stop + test_assert.status(r, 201)" +/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine when requested with invalid token,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_deploy_stop_invalid_token(invalid_exec_api): + """""" + stopping machine deployment with invalid token + """""" + deploy_id = ""invalid"" + depl_stop = invalid_exec_api.deploy_stop(deploy_id, wait=False) + depl_json = depl_stop.json() + test_assert.status(depl_stop, 401) + assert depl_json[""detail""] == ""Invalid token."" +" +/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine when Invalid UUID is provided,"{ + deploy_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Machine matching query does not exist"" +}","def test_deploy_stop_invalid_UUID(run_api): + """""" + stopping machine deployment using a machine id for which machine does not exist + """""" - # Filter on list time - # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 - # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', - ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'last_op_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 - # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800, microseconds=1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 0 - # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'last_op_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800, microseconds=1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 1 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'last_op_start_date', 'last_op_end_date' and 'last_op_date_range'. - # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'today - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', - ""last_op_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine has done last operation yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) - # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'yesterday - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', - ""last_op_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 - except AssertionError: - # when machine has done last operation yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) - # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'week - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', - ""last_op_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine has done last operation on week's last day at 23:59:59.9999999 list and test get triggered on new week at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) - # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'month - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', - ""last_op_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine has done last operation on month's last day at 23:59:59.9999999 list and test get triggered on new month at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) - # ........When the 'last_op_start_date' and 'last_op_end_date' are same as in the detail and 'last_op_date_range' is passed as'year' - try: - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', - ""last_op_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine has done last operation on year's last day at 23:59:59.9999999 list and test get triggered on new year at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_last_op(last_op_start_date=str_utime, last_op_end_date=str_utime, last_op_date_range=""yesterday"", utc=False) - # .........When the last_op_date_range format is invalid - response = run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', ""last_op_end_date"": convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30', - ""last_op_date_range"": ""invalid"", ""page_size"": 1}) - test_assert.status(response, 400) - assert response.json()['last_op_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() - # .........When the last_op_start_date and last_op_end_date has white spaces in them - assert run_api.deploy_list({""UUID"": deploy_id, ""last_op_start_date"": "" "" + convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30' + "" "", ""last_op_end_date"": "" "" + convert_datetime_stringform(datetime_utime + timedelta(seconds=19800)) + '+05:30' + "" "", - ""last_op_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + deploy_id = ""invalid"" + r = run_api.deploy_stop(deploy_id, wait=False) + test_assert.status(r, 404) + rjson = r.json() + assert ""Machine matching query does not exist"" in rjson[""error""], rjson - # ........Filter on 'created_start_date', 'created_end_date', 'last_op_start_date', 'last_op_end_date', 'created_date_range' and 'last_op_date_range' - assert run_api.deploy_list({""UUID"": deploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""last_op_start_date"": str_utime, - ""last_op_end_date"": str_utime, ""created_date_range"": ""today"", ""last_op_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 +" +/group/rest/add-server/{id}/,"addition of server when server id is invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +server_group_list = { + ""servers_list"": [""0""] + } +}","{ +status : 400/404 +}","@pytest.mark.skip(reason=""Skipping this test because of it is returning 207 in place 400/404"") +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_server_add_with_invalid_server_id(run_api, custom_group_admin_operations): + """""" + Add Server in Group with invalid server id + """""" + server_group_list = { + ""servers_list"": [""0""] + } + params, r = custom_group_admin_operations + res = r.json() + group_id = res['id'] - run_api.deploy_stop(deploy_id) - run_api.deploy_image_delete(deploy_id) + r = run_api.group_add_server(server_group_list, group_id) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + status_code = r.status_code + assert status_code == 400 or status_code == 404 +" +/group/rest/add-server/{id}/,"addition of server when both group and server id are valid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +status : 403 / 202 +}","PARAMETERS = [{""action"": GROUP_ADD_SERVER}] + +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_server_add(run_api, custom_group_admin_operations): + """""" + Add Server in Group + """""" + param, ret = custom_group_admin_operations + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(ret, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(ret, 202) + +" +/group/rest/add-server/{id}/,"addition of server when both group and server id are invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","server_group_list = { + ""servers_list"": [""0""] + }","{ +""status"" : 400 +""message"" : ""Group does not exist"" +}","def test_group_server_add_invalid_server_id_and_grp_id(run_api): + """""" + Add Server in Group with invalid server id and invalid group id + """""" + server_group_list = { + ""servers_list"": [""0""] + } + r = run_api.group_add_server(server_group_list, group_id=0) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == ""Group does not exist"" + test_assert.status(r, 400) + +" +/group/rest/add-server/{id}/,addition of server to group without Authorization ,"server_group_list = { + ""servers_list"": [""0""] + }","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +} +","def test_group_server_add_without_authorization(anonymous_exec_api): + """""" + Add Server in Group without Authorization + """""" + + server_group_list = { + ""servers_list"": [""0""] + } + r = anonymous_exec_api.group_add_server(server_group_list, group_id=0) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" +" +/group/rest/add-server/{id}/,addition of server to group when requested with invalid token,"server_group_list = { ""servers_list"": [""0""] }","{ +""status"" : 401, +""message"" : ""Invalid token"" +} +"," +def test_group_server_add_with_invalid_token(invalid_exec_api): + """""" + Add Server in Group with invalid token + """""" + server_group_list = { + ""servers_list"": ['0'] + } + r = invalid_exec_api.group_add_server(server_group_list, group_id=0) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +/group/rest/add-server/{id}/,"addition of server to group when invalid group id is provided. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +group_id=0, +}","{ + ""status"": 400, + ""message"": ""Group does not exist"" +}","def test_group_server_add_with_invalid_id(run_api, server_list): + """""" + Add Server in Group with invalid group id + """""" + r = server_list + res = r.json() + list_server = [result['UUID'] for result in res['results']] + server_group_list = { + ""servers_list"": list_server + } + r = run_api.group_add_server(server_group_list, group_id=0) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == ""Group does not exist"" + test_assert.status(r, 400) +" +/group/rest/add-user/{id}/,requesting of user addition to group with invalid token,"{group_id=0, +user_ids=[0] +}","{""status"":401, +""message"": ""Invalid Token"" +}","def test_group_add_user_with_invalid_token(invalid_exec_api): + """""" + Adding user id into group with invalid token + """""" + template, r = invalid_exec_api.group_add_user(group_id=0, user_ids=[0]) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Invalid token.' " -/deploy/rest/deploy/{{UUID}}/,getting the list of deployed image with added filters,,,"library_count = 10 -prefix_name = ""filter_vmname_dep_list_"" - -@pytest.mark.parametrize(""lib_filter_kwargs"", [{""vm_names"": [f""{prefix_name}{rand_string()}"" for _ in range(library_count)]}], indirect=True) -def test_deploy_list_filter(run_api: apiops, lib_filter_kwargs): +/group/rest/add-user/{id}/,providing valid Group Id and User Id,"{ +""search"" :""vivekt"" +}","{ +""status"" : 202, +""response"" : Accepted","@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD_MANAGER_AS_MANAGER}], indirect=True) +def test_group_add_user_by_manager(skip_if_not_manager, custom_group_admin_operations, run_api): """""" - Fetching the list of deployed images by adding filters + When provided user_id and group_id """""" - depl_res = [] - templates, res = lib_filter_kwargs - for r in res: - rjson = r.json() - depl_r = run_api.deploy_image(rjson[""UUID""]) - depl_res.append(depl_r) - try: - filter_on_input_result(run_api, library_count, templates, depl_res, prefix_name, run_api.deploy_list) - finally: - depl_UUIDs = [depl.json()[""UUID""] for depl in depl_res] - run_api.deploy_bulkops({""machine_list"": depl_UUIDs, ""op"": ""delete""}) + template, r = custom_group_admin_operations + group_id = template['group_id'] + user_r = run_api.user_list({'search': 'vivekt'}) + user_id = user_r.json()['results'][0]['id'] + params, result = run_api.group_add_user(group_id, user_ids=[user_id]) + test_assert.status(result, 202) +" +/group/rest/add-user/{id}/,"providing invalid User Id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{""user_id"" : 0}","{""status"" : 207 }","PARAMETERS = [{""action"": GROUP_ADD_USER}] +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_add_user_with_invalid_user_id(run_api, custom_group_admin_operations): + """""" + Adding invalid user id into group + """""" + params, r = custom_group_admin_operations + res = r.json() + group_id = res['id'] + template, r = run_api.group_add_user(group_id, user_ids=[0]) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 207) " -/deploy/rest/deploy/{{UUID}}/,provide server_list that is not under any group that the user is a part of,,"{ -""status"" : 400, -""message"" : 'You are not a part of the provided Group(s)' +/group/rest/add-user/{id}/,"providing invalid Group Id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{""group_id"" : 0}","{""status"":400, +""message"": ""'Group does not exist'"" }"," -def test_deploy_image_manager_server_list(skip_if_not_manager, run_api, library_add_new_vm): +def test_group_add_user_with_invalid_group_id(run_api, admin_exec_api): """""" - deploy a VM image with provided server list, such that servers doesn't belong to any group, that user is a part of + Adding users into invalid group id """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - server_list = list(run_api.clm_not_my_servers.keys()) - r = run_api.deploy_image(lib_id, server_list=server_list) - res = r.json() - test_assert.status(r, 400) - assert res[""result""] == 'FAILURE', res - assert ""Selected server(s) aren't under any group that you are a part of"" in res[""error""], res + user_result = admin_exec_api.user_list() + res = user_result.json() + user_ids = [result['id'] for result in res['results']] + template, r = run_api.group_add_user(group_id=0, user_ids=user_ids) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == 'Group does not exist' + test_assert.status(r, 400) " -/deploy/rest/details/{{UUID}}/,getting deployment details of a VM by Admin user,,"{ -""status"" : 200 -}","PARAMETERS = [{""dest_obj"": OBJ_DEPL}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_details_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): +/group/rest/add-user/{id}/,adding user id to group without Authorization ,"{group_id=0, +user_ids=[0] +}","{status"":401, +""message"":'Authentication credentials were not provided.' +}","def test_group_add_user_without_authorization(anonymous_exec_api): """""" - Details of the VM by Admin + Adding user id into group without Authorization """""" - # Admin check for fetching details of a Deployed VM created by different user. - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_details(deploy_id) - test_assert.status(r, 200) + template, r = anonymous_exec_api.group_add_user(group_id=0, user_ids=[0]) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Authentication credentials were not provided.' " -/deploy/rest/details/{{UUID}}/,getting deployment details of a VM by manager who does not haverights over server,,,"endpoint = ""deploy_details"" +/group/rest/add/,"adding new group. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 201, +}","ef test_add_group(run_api, group_add): + """""" + Adding new Group + """""" + template, r = group_add + result = r.json() + if run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(template, result, ""group_add"") + test_assert.status(r, 201) -PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_deploy_details_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - Details of the VM by Manager + elif run_api.user_type == USER_TYPE[""manager""]: + test_assert.status(r, manager_rights_response(endpoint)) +" +/group/rest/add/,adding new group without authorization.,,"{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_add_group_without_authorization(anonymous_exec_api): """""" - # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server - deploy_id = custom_lib_admin_operations - r = run_api.deploy_details(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + Adding new Group without Authorization + """""" + params, r = anonymous_exec_api.group_add() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Authentication credentials were not provided."" - # When the user is part of the group that the manager manages but the deployment is not on manager rightful server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_details(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) " -/deploy/rest/details/{{UUID}}/,getting deployment details of a VM by manager who has rights over server,,,"endpoint = ""deploy_details"" - -PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""deploy_with"": SRV_MANAGER_RIGHTS}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_deploy_details_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): +/group/rest/add/,"adding new group when invalid deployment strategy is provided. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"": ""test_group"", + ""deployment_strategy"": ""invalid"" + }","{ + ""status"" : 400, + ""message"" : ""Invalid deployment_strategy"" +}","def test_add_group_invalid_deployment_strategy(run_api): """""" - Details of the VM by Manager + provide invalid deployment_strategy """""" - # When the user is not part of the group that the manager manages and deployment is on manager rights to server - deploy_id = custom_lib_admin_operations - r = run_api.deploy_details(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + group = { + ""name"": ""test_group"", + ""deployment_strategy"": ""invalid"" + } + params, r = run_api.group_add(template=group) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + result = r.json() + test_assert.status(r, 400) + assert result['error'] == ""Invalid deployment_strategy"", ""|> Json %s"" % result +" +/group/rest/add/,"adding new group when group name field is missing. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 400, +""message"" : ""Group Name is required and it can not be blank"" +}","def test_add_group_with_group_name_field_missing(run_api): + """""" + Adding new Group with group name field missing + """""" + params, r = run_api.group_add(template={}) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + result = r.json() + test_assert.status(r, 400) + assert result['error'] == ""Group Name is required and it can not be blank"" - # When the user is part of the group that the manager manages and deployment is on manager rights to server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_details(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) " -/deploy/rest/details/{{UUID}}/,getting deployment details of a VM by non-admin user,,"{ -""status"" : 403 -}","PARAMETERS = [{""dest_obj"": OBJ_DEPL}] +/group/rest/add/,adding new group using invalid token. ,,"{ + ""status"" : 401, + ""message"" : ""Invalid token"" +}","def test_add_group_with_invalid_token(invalid_exec_api): + """""" + Adding new Group with invalid token + """""" + params, r = invalid_exec_api.group_add() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Invalid token."" +" +/group/rest/add/,"adding new group by setting blank group name. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""name"": """" +}","{ +""status"" : 400, +""message"" : ""Group Name is required and it can not be blank"" +}"," +def test_add_group_with_blank_group_name(run_api): + """""" + Adding new Group with blank group name + """""" + params, r = run_api.group_add(template={""name"": """"}) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + result = r.json() + test_assert.status(r, 400) + assert result['error'] == ""Group Name is required and it can not be blank"" -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_details_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): +" +/group/rest/add/,"adding new group by manager +",,"{ + ""status"" :403 +}","def test_add_group_by_manager(skip_if_not_manager, group_add): """""" - Details of the VM by non-Admin + when group name is provided """""" - # Non-admin check for fetching details of a Deployed VM created by different user. - deploy_id = custom_lib_admin_operations - r = run_api.deploy_details(deploy_id) + template, r = group_add test_assert.status(r, 403) " -/deploy/rest/details/{{UUID}}/,getting deployment details of a VM using invalid machine id,"{ -deploy_id = ""invalid"" +/group/rest/bulkdelete/,requesting bulk deletion of groups with invalid token,"{ +group_id_list=[0] }","{ -""status"" : 404 -}","def test_deploy_details_invalid_UUID(run_api): +""status"":401, +""message"": ""Invalid Token"" +}","def test_group_bulk_delete_with_invalid_token(invalid_exec_api): """""" - Getting Deploy details of the VM using invalid id + delete group in bulk with invalid token """""" - deploy_id = ""invalid"" - r = run_api.deploy_details(deploy_id) - test_assert.status(r, 404) + r = invalid_exec_api.group_bulk_delete(group_id_list=[0]) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Invalid token.' " -/deploy/rest/details/{{UUID}}/,getting deployment details of a VM using invalid token,"{ -deploy_id = ""invalid"" -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_deploy_details_invalid_token(invalid_exec_api): +/group/rest/bulkdelete/,"providing valid group ids. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{""status"": 202/403, +""message"":Accepted}","PARAMETERS = [{""action"": GROUP_BULK_DELETE}] + +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_bulk_delete(run_api, custom_group_admin_operations): """""" - Getting Deploy details of the VM using invalid token + delete group in bulk """""" - deploy_id = ""invalid"" - depl_details = invalid_exec_api.deploy_details(deploy_id) - depl_json = depl_details.json() - test_assert.status(depl_details, 401) - assert depl_json[""detail""] == ""Invalid token."" + template, r = custom_group_admin_operations + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 202) " -/deploy/rest/details/{{UUID}}/,getting deployment details of a VM using valid machine id,,"{ -""status"" : 200 -}","def test_deploy_details(deploy_details): +/group/rest/bulkdelete/,"providing invalid ids. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +invalid_group_list = [0] +}","{""status"":400/403, +""message"":Bad Request}","@pytest.mark.skip(reason=""Skipping this test because it is returning 207 at the place of 400"") +def test_group_bulk_delete_with_invalid_id(run_api): """""" - Getting Deploy details of the VM + delete groups in bulk with invalid id """""" - x, r = deploy_details - test_assert.status(r, 200) + invalid_group_list = [0] + r = run_api.group_bulk_delete(invalid_group_list) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 400) " -/deploy/rest/details/{{UUID}}/,getting deployment details of a VM without authorization,"{ -deploy_id = ""invalid"" +/group/rest/bulkdelete/,performing bulk delete without Authorization ,"{ +group_id_list = [0] }","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_details_without_authorization(anonymous_exec_api): +""status"":401, +""message"":'Authentication credentials were not provided.' +}","def test_group_bulk_delete_without_authorization(anonymous_exec_api): """""" - Getting Deploy details of the VM without authorization + delete group in bulk without Authorization """""" - deploy_id = ""invalid"" - depl_details = anonymous_exec_api.deploy_details(deploy_id) - depl_json = depl_details.json() - test_assert.status(depl_details, 401) - assert depl_json[""detail""] == ""Authentication credentials were not provided."" + r = anonymous_exec_api.group_bulk_delete(group_id_list=[0]) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Authentication credentials were not provided.' " -/deploy/rest/mac_addr/((UUID}}/,getting MAC address when invalid UUID is provided,"{ -deploy_id = ""invalid"" +/group/rest/delete/{id}/,requesting group deletion with invalid token,"{ +id=0 }","{ -""status"" : 404, -""message"" : ""Machine matching query does not exist"" -}","def test_deploy_mac_addr_invalid_UUID(run_api): + ""status"": 401, + ""message"": ""Invalid Token"" +}","def test_group_delete_with_invalid_token(invalid_exec_api): """""" - fetching the mac address of VM using invalid machine_id + Delete group with invalid token """""" - deploy_id = ""invalid"" - r = run_api.deploy_mac_addr(deploy_id) - test_assert.status(r, 404) - rjson = r.json() - assert ""Machine matching query does not exist"" in rjson[""error""], rjson - + r = invalid_exec_api.group_delete(id=0) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Invalid token.' " -/deploy/rest/mac_addr/((UUID}}/,getting MAC address when requested using invalid token,"{ -deploy_id = ""invalid"" +/group/rest/delete/{id}/,"providing invalid id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +id=0 }","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_deploy_mac_addr_invalid_token(invalid_exec_api): +""status"": 400/403, +""message"": ""Group that matches the query does not exist"" +}","def test_group_delete_with_invalid_id(run_api): """""" - fetching the mac address of VM using invalid token + Delete a group with invalid id """""" - deploy_id = ""invalid"" - depl_mac_addr = invalid_exec_api.deploy_mac_addr(deploy_id) - depl_json = depl_mac_addr.json() - test_assert.status(depl_mac_addr, 401) - assert depl_json[""detail""] == ""Invalid token."" + r = run_api.group_delete(id=0) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == 'Group matching query does not exist.' + test_assert.status(r, 400) " -/deploy/rest/mac_addr/((UUID}}/,getting MAC address when valid UUID is provided and machine is connected to network.,,"{ -""status"" : 200, -""response"" : ""MAC address of VM -}","def test_deploy_mac_addr(deploy_mac_addr): +/group/rest/delete/{id}/,group deletion without authorization ,"{ +id=0 +}","{ + ""status"": 401, + ""message"" : 'Authentication credentials were not provided.' + +}"," +def test_group_delete_without_authorization(anonymous_exec_api): """""" - fetching the mac address of VM + Delete group without authorization """""" - x, r = deploy_mac_addr - test_assert.status(r, 200) + r = anonymous_exec_api.group_delete(id=0) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Authentication credentials were not provided.' " -/deploy/rest/mac_addr/((UUID}}/,getting MAC address when valid UUID is provided and machine is not connected to network.,,"{ -""status"" : 400, -""message"" : ""Mac Addr can only be fetched when machine is in running/pausing/paused state"" -}","def test_deploy_mac_addr_stopped_machine(run_api, deploy_image): +/group/rest/delete/{id}/,"deleting group with valid group id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"": 204 +}","PARAMETERS = [{""action"": GROUP_DELETE}] + +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_delete(run_api, custom_group_admin_operations): """""" - Get Mac Addr when machine is in stopped state + delete a group """""" - params, r = deploy_image - machine_id = r.json()[""UUID""] - res = run_api.deploy_mac_addr(deploy_id=machine_id) - test_assert.status(res, 400) - assert res.json()[""error""] == ""Mac Addr can only be fetched when machine is in running/pausing/paused state"" + template, r = custom_group_admin_operations + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 204) " -/deploy/rest/mac_addr/((UUID}}/,getting MAC address without Authorization,"{ -deploy_id = ""invalid"" +/group/rest/remove-server/{id}/,removing server from group without Authorization ,"{ +group_id = 0 }","{ ""status"" : 401, ""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_mac_addr_without_authorization(anonymous_exec_api): +} +","def test_group_remove_server_without_authorization(anonymous_exec_api): """""" - fetching the mac address of VM without authorization + Remove server from group without authorization """""" - deploy_id = ""invalid"" - depl_mac_addr = anonymous_exec_api.deploy_mac_addr(deploy_id) - depl_json = depl_mac_addr.json() - test_assert.status(depl_mac_addr, 401) - assert depl_json[""detail""] == ""Authentication credentials were not provided."" + r = anonymous_exec_api.group_remove_server(group_id=0, params={""servers_list"": ['0']}) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" " -/deploy/rest/pause/{{UUID}}/,pausing a running VM when Invalid UUID is provided,"{ -deploy_id = ""invalid"" -}","{ -""status"" : 404, -""message"" : ""Machine matching query does not exist"" -}","def test_deploy_deploy_pause_invalid_UUID(run_api): +/group/rest/remove-server/{id}/,"removing server from group when server id is invalid.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 400/404 +}","@pytest.mark.skip(reason=""Skipping this test because of it is returning 207 in place 400/404"") +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_remove_server_invalid_server_id(run_api, custom_group_admin_operations): """""" - Pausing the VM with invalid deploy_id + Remove server from group when server id is invalid """""" - deploy_id = ""invalid"" - r = run_api.deploy_pause(deploy_id, wait=False) - test_assert.status(r, 404) - rjson = r.json() - assert ""Machine matching query does not exist"" in rjson[""error""], rjson + params, r = custom_group_admin_operations + res = r.json() + group_id = res['id'] + r = run_api.group_remove_server(group_id, params={""servers_list"": [""0""]}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + status_code = r.status_code + assert status_code == 400 or status_code == 404 " -/deploy/rest/pause/{{UUID}}/,pausing a running VM when requested with invalid token,"{ -deploy_id = ""invalid"" +/group/rest/remove-server/{id}/,removing server from group when requested with invalid token,"{ +group_id = 0 }","{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_deploy_deploy_pause_invalid_token(invalid_exec_api): - """""" - Pausing the VM using invalid tokens - """""" - deploy_id = ""invalid"" - depl_pause = invalid_exec_api.deploy_pause(deploy_id, wait=False) - depl_json = depl_pause.json() - test_assert.status(depl_pause, 401) - assert depl_json[""detail""] == ""Invalid token."" -" -/deploy/rest/pause/{{UUID}}/,pausing a running VM without Authorization,"{ -deploy_id = ""invalid"" -}","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_deploy_pause_without_authorization(anonymous_exec_api): +} +","def test_group_remove_server_with_invalid_token(invalid_exec_api): """""" - Pausing the VM without authorization + Remove server from group with invalid token """""" - deploy_id = ""invalid"" - depl_pause = anonymous_exec_api.deploy_pause(deploy_id, wait=False) - depl_json = depl_pause.json() - test_assert.status(depl_pause, 401) - assert depl_json[""detail""] == ""Authentication credentials were not provided."" + r = invalid_exec_api.group_remove_server(group_id=0, params={""servers_list"": ['0']}) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" " -/deploy/rest/pause/{{UUID}}/,pausing VM using valid deploy_id by admin user,,"{ -""status"" : 201 -}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_deploy_pause_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): +/group/rest/remove-server/{id}/,"removing server from group when invalid group id is provided. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""servers_list"": [""0""], + ""group_id"" = 0 + }","{ +""status"" : 400 +""message"" : ""Group does not exist"" +}","def test_group_remove_server_invalid_group_id(run_api, server_list): """""" - Pausing the VM by Admin + Remove server from group when group id is invalid """""" - # Admin check of Starting a deployment created by different user - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_pause(deploy_id) - test_assert.status(r, 201) + r = server_list + res = r.json() + list_server = [result['UUID'] for result in res['results']] + servers_list = { + ""servers_list"": list_server + } + r = run_api.group_remove_server(group_id=0, params={""servers_list"": servers_list}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == ""Group does not exist"" + test_assert.status(r, 400) " -/deploy/rest/pause/{{UUID}}/,pausing VM using valid deploy_id by manager who do not have rights over server,,,"endpoint = ""deploy_pause"" +/group/rest/remove-server/{id}/,"removing server from group when both group and server id are valid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 403 / 202 +}","PARAMETERS = [{""action"": GROUP_ADD_SERVER}] -PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_deploy_deploy_pause_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_remove_server(skip_if_manager, run_api, custom_group_admin_operations): """""" - Pausing the VM by manager when have no right on server + Remove Server in Group """""" - # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server - deploy_id = custom_lib_admin_operations - r = run_api.deploy_pause(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) - - # When the user is part of the group that the manager manages but the deployment is not on manager rightful server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_pause(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + params, r = custom_group_admin_operations + group_id = params[""group_id""] + server_list = params[""server_list""] + r = run_api.group_remove_server(group_id, params={""servers_list"": server_list}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 202) +" +/group/rest/remove-server/{id}/,"removing server from group when both group and server id are invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +group_id = 0 +}","{ +""status"" : 400, +""message"" : ""Group does not exist"" +}","def test_group_remove_server_invalid_server_id_and_grp_id(run_api): + """""" + Remove server from group when both server id and group id is invalid + """""" + r = run_api.group_remove_server(group_id=0, params={""servers_list"": [""0""]}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == ""Group does not exist"" + test_assert.status(r, 400) " -/deploy/rest/pause/{{UUID}}/,pausing VM using valid deploy_id by manager who has rights over server,,," -endpoint = ""deploy_pause"" - -PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_deploy_deploy_pause_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): +/group/rest/remove-user/{id}/,deleting user from group without Authorization ,"{ +group_id = 0, +""users_list"": [0] +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +} +","def test_group_remove_user_without_authorization(anonymous_exec_api): """""" - Pausing the VM by manager when have right on server + Remove user from group without authorization """""" - # When the user is not part of the group that the manager manages - deploy_id = custom_lib_admin_operations - r = run_api.deploy_pause(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + r = anonymous_exec_api.group_remove_user(group_id=0, params={""users_list"": [0]}) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" +" +/group/rest/remove-user/{id}/,"deleting user from group when user id is invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 400 / 404 +}","PARAMETERS = [{""action"": GROUP_ADD}] - # When the user is part of the group that the manager manages and deployment is on manager rights to server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_pause(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True))" -/deploy/rest/pause/{{UUID}}/,pausing VM using valid deploy_id by non-admin user,,"{ -""status"" : 403 -}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_deploy_pause_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): +@pytest.mark.skip(reason=""Skipping this test because of it is returning 207 in place 400/404"") +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_remove_user_with_invalid_user_id(run_api, custom_group_admin_operations): """""" - Pausing the VM by non-admin + Remove user from group when invalid user id is provided """""" - # Non-admin check of Starting a deployment created by different user - deploy_id = custom_lib_admin_operations - r = run_api.deploy_pause(deploy_id) - test_assert.status(r, 403) + params, r = custom_group_admin_operations + res = r.json() + group_id = res['id'] + r = run_api.group_remove_user(group_id, params={""users_list"": [0]}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + status_code = r.status_code + assert status_code == 400 or status_code == 404 " -/deploy/rest/pause/{{UUID}}/,pausing VM when valid UUID is provided and machine is in paused state,,"{ -""status"" : 400, -""message"" : ""Cannot perform pause operation on paused state of a machine"" -}","def test_deploy_deploy_pause_already_paused_vm(deploy_pause, run_api): +/group/rest/remove-user/{id}/,deleting user from group when requested with invalid token,"{ +group_id = 0, +""users_list"": [0] +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +} +","def test_group_remove_user_with_invalid_token(invalid_exec_api): """""" - Pausing a vm that is already paused + Remove user from group with invalid token """""" - x, r = deploy_pause + r = invalid_exec_api.group_remove_user(group_id=0, params={""users_list"": [0]}) res = r.json() - deploy_id = res[""UUID""] - response = run_api.deploy_pause(deploy_id) - test_assert.status(response, 400) - rjson = response.json()[""error""] - assert rjson == ""Cannot perform pause operation on paused state of a machine"" + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" " -/deploy/rest/pause/{{UUID}}/,pausing VM when valid UUID is provided and machine state is in running,,"{ -""status"" : 20, -""response"" :Machine state should be set to paused -}","def test_deploy_deploy_pause_self(deploy_pause): +/group/rest/remove-user/{id}/,"deleting user from group when invalid group id is provided. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +group_id = 0 +}","{ +""status"" : 400 / 403, +""message"" : ""Group does not exist"" +}","def test_group_remove_user_with_invalid_grp_id(run_api, admin_exec_api): """""" - Pausing the VM + Remove user from group when invalid group id is provided """""" - x, r = deploy_pause - test_assert.status(r, 201) + user_result = admin_exec_api.user_list() + res = user_result.json() + user_ids = [result['id'] for result in res['results']] + r = run_api.group_remove_user(group_id=0, params={""users_list"": user_ids}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + res = r.json() + assert res['error'] == ""Group does not exist"" + test_assert.status(r, 400) " -/deploy/rest/reboot/{{UUID}}/,a non-admin user rebooting a VM when it is in running state using valid id,,"{ -""status"" : 403 -}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] +/group/rest/remove-user/{id}/,"deleting user from group when both group and server id are valid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +group_id = 0 +}","{ +""status"" : 403 / 202 +}","@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_remove_user(skip_if_manager, run_api, custom_group_admin_operations): + """""" + Remove User in Group + """""" + params, r = custom_group_admin_operations + group_id = params[""group_id""] + user_list = params[""users_list""] + r = run_api.group_remove_user(group_id, params={""users_list"": user_list}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 202) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_deploy_reboot_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): +" +/group/rest/remove-user/{id}/,"deleting user from group when both group and user id are invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +group_id = 0, +""users_list"": [0] +}","{ +""status"" : 400, +""message"" : ""Group does not exist"" +}","def test_group_remove_user_invalid_grp_and_user_id(run_api): """""" - Rebooting the VM by non-admin + Remove user from group when invalid user id and group id are provided """""" - # Non-admin check of Starting a deployment created by different user - deploy_id = custom_lib_admin_operations - r = run_api.deploy_reboot(deploy_id) - test_assert.status(r, 403) + r = run_api.group_remove_user(group_id=0, params={""users_list"": [0]}) + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + res = r.json() + assert res['error'] == ""Group does not exist"" + test_assert.status(r, 400) " -/deploy/rest/reboot/{{UUID}}/,admin rebooting a VM when it is in running state using valid id,,"{ -""status"" : 201 -}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_deploy_reboot_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): +/ideploy/rest/delete/{UUID}/,Delete a Private Island which you are not an owner of but as admin,,"{ +""status"" : 201, +} +","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_delete_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): """""" - Rebooting the VM by Admin + Deleting the Deployed Island image by Admin """""" - # Admin check of Starting a deployment created by different user - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_reboot(deploy_id) + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_delete(deploy_id) test_assert.status(r, 201) " -/deploy/rest/reboot/{{UUID}}/,manager rebooting a VM when it is in running state using valid id where the manager do not have rights over the servers,,,"endpoint = ""deploy_reboot"" - -PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_deploy_deploy_reboot_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): +/ideploy/rest/delete/{UUID}/,Delete a Private Island which you are not an owner of and not as admin,,"{ +""status"" : 403, +} +","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_delete_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): """""" - Rebooting the VM by manager when have no right on server + Deleting the Deployed Island image by non-Admin """""" - # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server - deploy_id = custom_lib_admin_operations - r = run_api.deploy_reboot(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) - - # When the user is part of the group that the manager manages but the deployment is not on manager rightful server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_reboot(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) - + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_delete(deploy_id) + test_assert.status(r, 403) " -/deploy/rest/reboot/{{UUID}}/,manager rebooting a VM when it is in running state using valid id where the manager has rights over the servers,,,"endpoint = ""deploy_reboot"" - -PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_deploy_deploy_reboot_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): +/ideploy/rest/deploy/{UUID}/,successful deployment of an island,,"{ +""status"" : 200, +} +","def test_ideploy_deploy_self(ideploy_deploy): """""" - Rebooting the VM by manager when have right on server + Deploy Island image """""" - # When the user is not part of the group that the manager manages - deploy_id = custom_lib_admin_operations - r = run_api.deploy_reboot(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + template, r = ideploy_deploy + test_assert.status(r, 200) - # When the user is part of the group that the manager manages and deployment is on manager rights to server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_reboot(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) - run_api.deploy_stop(deploy_id) " -/deploy/rest/reboot/{{UUID}}/,rebooting a running VM when Invalid UUID is provided,"{ -deploy_id = ""invalid"" -}","{ -""status"" : 404, -""message"" : ""Machine matching query does not exist"" -}","def test_deploy_deploy_reboot_invalid_UUID(run_api): +/ideploy/rest/deploy/{UUID}/,Select a server for deployment,,"{ +""status"" : 201, +} +","def test_ideploy_deploy_select_server(run_api, ilibrary_add_new_island): + params, r = ilibrary_add_new_island + uuid = r.json()[""uuid""] + deploy_on = list(run_api.clm_my_servers.keys()) + r = run_api.ideploy_deploy(uuid, deploy_on) + x = r.json() + deploy_id = x[""deploy_uuid""] + r = run_api.ideploy_delete(deploy_id) + test_assert.status(r, 201) +" +/ideploy/rest/deploy/{UUID}/,Select a group for deployment,,"{ +""status"" : 201, +} +","def test_ideploy_deploy_select_group(run_api, ilibrary_add_new_island): + params, r = ilibrary_add_new_island + uuid = r.json()[""uuid""] + grp_list = list(run_api.clm_my_groups.keys()) + r = run_api.ideploy_deploy(uuid, group_list=grp_list) + x = r.json() + deploy_id = x[""deploy_uuid""] + r = run_api.ideploy_delete(deploy_id) + test_assert.status(r, 201) +" +/ideploy/rest/deploy/{UUID}/,provide tags in param,"{ + ""tag_list"": ""custom_tags"", + }","{ +""status"" : 200, +} +","def test_ideploy_deploy_with_tags(ilibrary_add_new_island, run_api): """""" - Rebooting a VM using invalid UUID + provide tags in params """""" - deploy_id = ""invalid"" - r = run_api.deploy_reboot(deploy_id) - test_assert.status(r, 404) + params, r = ilibrary_add_new_island + uuid = r.json()[""uuid""] + tag_name = ""custom_tags"" + params = { + ""tag_list"": [tag_name] + } + r = run_api.ideploy_deploy(uuid, **params) rjson = r.json() - assert ""Machine matching query does not exist"" in rjson[""error""], rjson + test_assert.status(r, 200) + isl_details = run_api.ideploy_details(rjson['deploy_uuid']).json() + all_tags = [tag['value'] for tag in isl_details['tags']] + assert tag_name in all_tags, ""|> Json %s"" % rjson + run_api.ideploy_delete(rjson['deploy_uuid']) " -/deploy/rest/reboot/{{UUID}}/,rebooting a running VM when requested with invalid token,"{ -deploy_id = ""invalid"" +/ideploy/rest/deploy/{UUID}/,deploying an island without authorization,"{ +deploy_id=""invalid"" }","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_deploy_deploy_reboot_invalid_token(invalid_exec_api): +""status"":401, +""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_deploy_without_authorization(anonymous_exec_api): """""" - Rebooting a VM using invalid token + deploy an island without authorization """""" deploy_id = ""invalid"" - depl_reboot = invalid_exec_api.deploy_reboot(deploy_id, wait=False) - depl_json = depl_reboot.json() - test_assert.status(depl_reboot, 401) - assert depl_json[""detail""] == ""Invalid token."" + idepl_deploy = anonymous_exec_api.ideploy_deploy(deploy_id, wait=False) + idepl_json = idepl_deploy.json() + test_assert.status(idepl_deploy, 401) + assert idepl_json[""detail""] == ""Authentication credentials were not provided."" " -/deploy/rest/reboot/{{UUID}}/,rebooting a running VM without authorization,"{ -deploy_id = ""invalid"" +/ideploy/rest/deploy/{UUID}/,deploying an island using invalid uuid,"{ +deploy_id=""invalid"" }","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_deploy_reboot_without_authorization(anonymous_exec_api): +""status"":404, +""message"" : ""failure"" +}","def test_ideploy_deploy_invalid_uuid(run_api): """""" - Rebooting a VM without authorization + deploy with invalid uuid """""" deploy_id = ""invalid"" - depl_reboot = anonymous_exec_api.deploy_reboot(deploy_id, wait=False) - depl_json = depl_reboot.json() - test_assert.status(depl_reboot, 401) - assert depl_json[""detail""] == ""Authentication credentials were not provided."" + r = run_api.ideploy_deploy(deploy_id) + test_assert.status(r, 404) + res = r.json() + assert res[""result""] == 'FAILURE', res + assert 'does not exist' in res[""error""], res" +/ideploy/rest/deploy/{UUID}/,deploying an island by manager when the manager has the required permissions for deployment,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_ideploy_deploy_manager_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Deploying an Island Image by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_deploy(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + # When the user is part of the group that the manager manages and deployment is on manager rights to server + lib_id = custom_ilib_non_admin_operations + r = run_api.ideploy_deploy(lib_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) " -/deploy/rest/reboot/{{UUID}}/,rebooting a VM when it is in running state using valid id,,"{ -""status"" : 201 -}","def test_deploy_deploy_reboot_self(deploy_reboot): +/ideploy/rest/deploy/{UUID}/,deploying an island by manager when the manager do not have the required permissions for deployment,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_ideploy_deploy_manager_no_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): """""" - Rebooting the VM + Deploying an Island Image by manager when have no server right """""" - r = deploy_reboot - test_assert.status(r, 201) + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + lib_id = custom_ilib_admin_operations + r = run_api.ideploy_deploy(lib_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + lib_id = custom_ilib_non_admin_operations + r = run_api.ideploy_deploy(lib_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + " -/deploy/rest/reset/{{UUID}}/,resetting a VM when invalid UUID is provided,"{ -deploy_id = ""invalid"" +/ideploy/rest/deploy/{UUID}/,deploying a island machine when requested with invalid token,"{ +deploy_id =""invalid"" }","{ -""status"" : 404, -""message"" : ""Machine matching query does not exist"" -}","def test_deploy_deploy_reset_invalid_UUID(run_api): +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ideploy_deploy_invalid_token(invalid_exec_api): """""" - resetting a VM for invalid UUID + deploying a island machine when requested with invalid token """""" deploy_id = ""invalid"" - r = run_api.deploy_reset(deploy_id, wait=False) - test_assert.status(r, 404) - rjson = r.json() - assert ""Machine matching query does not exist"" in rjson[""error""], rjson + idepl_deploy = invalid_exec_api.ideploy_deploy(deploy_id, wait=False) + idepl_json = idepl_deploy.json() + test_assert.status(idepl_deploy, 401) + assert idepl_json[""detail""] == ""Invalid token."" " -/deploy/rest/reset/{{UUID}}/,resetting a VM when requested with invalid token,"{ -deploy_id = ""invalid"" +/ideploy/rest/deploy/{UUID}/,"deploying a island machine using valid existing uuid and providing name param, where the name contains slash ","{ +name : ""test/island"" }","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_deploy_deploy_reset_invalid_token(invalid_exec_api): + ""status"" : 404, + ""message"" : ""Name cannot contain `/`"" +}","def test_ideploy_deploy_name_contains_slash(ilibrary_add_new_island, run_api): """""" - resetting a VM for invalid token + name contains '/' """""" - deploy_id = ""invalid"" - depl_reset = invalid_exec_api.deploy_reset(deploy_id, wait=False) - depl_json = depl_reset.json() - test_assert.status(depl_reset, 401) - assert depl_json[""detail""] == ""Invalid token."" + params, r = ilibrary_add_new_island + uuid = r.json()[""uuid""] + r = run_api.ideploy_deploy(uuid, name=""test/island"") + rjson = r.json() + test_assert.status(r, 400) + assert rjson['error'] == ""Name cannot contain `/`"", ""|> Json %s"" % rjson + " -/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided,,"{ -""status"" : 201 -}","def test_deploy_deploy_reset_self(deploy_reset): +/ideploy/rest/deploy/{UUID}/,deploy with a name,"{ + ""name"": ""test_island"", +}","{ +""status"" : 201, +} +"," +def test_ideploy_deploy_with_name(run_api, ilibrary_add_new_island): """""" - Resetting the VM + island deployment using name parameter """""" - r = deploy_reset + params, r = ilibrary_add_new_island + uuid = r.json()[""uuid""] + r = run_api.ideploy_deploy(uuid, name=""test_island"") + x = r.json() + deploy_id = x[""deploy_uuid""] + r = run_api.ideploy_details(deploy_id) + rjson = r.json() + assert ""test_island"" in rjson[""island""][""name""], rjson + r = run_api.ideploy_delete(deploy_id) test_assert.status(r, 201) " -/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided by a non-admin user,,"{ -""status"" : 403 -}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] - -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_deploy_reset_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): +/ideploy/rest/deploy/{UUID}/,deploy a Public Island with Admin rights but not owner,,"{ +""status"" : 200, +} +","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_deploy_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): """""" - Resetting the VM by non-admin + Deploying an Island Image by Admin """""" - # Non-admin check of Starting a deployment created by different user - deploy_id = custom_lib_admin_operations - r = run_api.deploy_reset(deploy_id) - test_assert.status(r, 403) + lib_id = custom_ilib_non_admin_operations + r = run_api.ideploy_deploy(lib_id) + x = r.json() + deploy_id = x[""deploy_uuid""] + test_assert.status(r, 200) + run_api.ideploy_delete(deploy_id) " -/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided by an admin user,,"{ -""status"" : 201 -}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_deploy_reset_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): +/ideploy/rest/deploy/{UUID}/,deploy a Private Island which you are not an owner of and not as admin,,"{ +""status"" : 403, +} +","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_deploy_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): """""" - Resetting the VM by Admin + Deploying an Island Image by Non-admin """""" - # Admin check of Starting a deployment created by different user - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_reset(deploy_id) - test_assert.status(r, 201) + lib_id = custom_ilib_admin_operations + r = run_api.ideploy_deploy(lib_id) + test_assert.status(r, 403) " -/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided by manager who do not have rights over servers,,,"endpoint = ""deploy_reset"" - -PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_deploy_deploy_reset_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): +/ideploy/rest/deploy/{UUID}/,"deploy ""arch"":""aarch64"" type island and provide server which does not support it",,"{ +""status"" : 400, +""message"" : ""Either, Architecture of the selected Servers doesn't support 'aarch64' or the hvm_type of 'kvm' isn't supported"" +} +","def test_ideploy_island_with_aarch64(run_api, server_list_arm): """""" - Resetting the VM by manager when have no right on server + deploy ""arch"":""aarch64"" type island but server does not support it """""" - # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server - deploy_id = custom_lib_admin_operations - r = run_api.deploy_reset(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + params, r = run_api.library_add_new_vm(arch='aarch64') + rjson_lib = r.json() - # When the user is part of the group that the manager manages but the deployment is not on manager rightful server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_reset(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + machine = { + ""uuid"": rjson_lib[""uuid""], + ""nic_update_id"": rjson_lib[""hw""][""networks""][0][""id""], + ""nic_delete_id"": rjson_lib[""hw""][""networks""][2][""id""] + } + island_params = template_add_ilibrary_one_machine(machine=machine) + params, r_isl = run_api.ilibrary_add_new_island(params=island_params) + uuid = r_isl.json()[""uuid""] + server_list = server_list_arm + deploy_on = server_list if server_list else list(run_api.clm_my_servers.keys()) + r = run_api.ideploy_deploy(uuid, deploy_on=deploy_on, name=""test_island"") + if server_list: + test_assert.status(r, 200) + rjson = r.json() + deploy_id = rjson[""deploy_uuid""] + run_api.ideploy_delete(deploy_id) + else: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Either, Architecture of the selected Servers doesn't support 'aarch64' or the hvm_type of 'kvm' isn't supported"", ""|> Json %s"" % rjson + run_api.ilibrary_delete(uuid) + run_api.library_delete(rjson_lib[""uuid""]) " -/deploy/rest/reset/{{UUID}}/,resetting a VM when valid UUID is provided by manager who has rights over servers,,,"endpoint = ""deploy_reset"" - -PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_deploy_deploy_reset_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): - """""" - Resetting the VM by manager when have right on server +/ideploy/rest/details/{UUID}/,fetching the details of deployed island machine without authorization,"{ +deploy_id =""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_details_without_authorization(anonymous_exec_api): """""" - # When the user is not part of the group that the manager manages - deploy_id = custom_lib_admin_operations - r = run_api.deploy_reset(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + without authorization + """""" + deploy_id = ""invalid"" + r = anonymous_exec_api.ideploy_details(deploy_id) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""The error message is %s"" % rjson['detail'] - # When the user is part of the group that the manager manages and deployment is on manager rights to server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_reset(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) - run_api.deploy_stop(deploy_id) " -/deploy/rest/reset/{{UUID}}/,resetting a VM without authorization,"{ -deploy_id = ""invalid"" +/ideploy/rest/details/{UUID}/,fetching the details of deployed island machine when requested with invalid token,"{ +deploy_id =""invalid"" }","{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_deploy_reset_without_authorization(anonymous_exec_api): +""message"" : ""Invalid token"" +}","def test_ideploy_details_with_invalid_token(invalid_exec_api): """""" - resetting a VM without authorization + Invalid Token """""" deploy_id = ""invalid"" - depl_reset = anonymous_exec_api.deploy_reset(deploy_id, wait=False) - depl_json = depl_reset.json() - test_assert.status(depl_reset, 401) - assert depl_json[""detail""] == ""Authentication credentials were not provided.""" -/deploy/rest/resume/{{UUID}}/,resuming a paused VM when Invalid UUID is provided,"{ -deploy_id = ""invalid"" -}","{ -""status"" : 404, -""message"" : ""Machine matching query does not exist"" -}","def test_deploy_deploy_resume_invalid_UUID(run_api): + r = invalid_exec_api.ideploy_details(deploy_id) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""The error message is %s"" % rjson['detail'] + + +" +/ideploy/rest/details/{UUID}/,fetching the deployment details of deployed island machine using invalid uuid ,"{ +deploy_id =""invalid"" +} + +","{ + ""status"" : 404, + ""message"" : ""Deployment of Island with uuid:invalid doesn't exists"" +}","def test_ideploy_details_with_invalid_uuid(run_api): """""" - resuming a VM which is paused using invalid deploy_id + Details of Island uuid does not exists """""" deploy_id = ""invalid"" - r = run_api.deploy_resume(deploy_id, wait=False) + r = run_api.ideploy_details(deploy_id) test_assert.status(r, 404) rjson = r.json() - assert ""Machine matching query does not exist"" in rjson[""error""], rjson + assert rjson['error'] == ""Deployment of Island with uuid:invalid doesn't exists"", ""The error message is %s"" % rjson['error'] + " -/deploy/rest/resume/{{UUID}}/,resuming a paused VM when requested with invalid token,"{ -deploy_id = ""invalid"" -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_deploy_deploy_resume_invalid_token(invalid_exec_api): +/ideploy/rest/details/{UUID}/,fetching details the deployed island machine using valid uuid,,"{ +""status"":200 +}","def test_ideploy_details(ideploy_details): """""" - resuming a paused VM using invalid token + Getting Island deploy details """""" - deploy_id = ""invalid"" - depl_resume = invalid_exec_api.deploy_resume(deploy_id, wait=False) - depl_json = depl_resume.json() - test_assert.status(depl_resume, 401) - assert depl_json[""detail""] == ""Invalid token."" + x, r = ideploy_details + test_assert.status(r, 200) " -/deploy/rest/resume/{{UUID}}/,resuming a paused VM without Authorization,"{ -deploy_id = ""invalid"" -}","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_deploy_resume_without_authorization(anonymous_exec_api): +/ideploy/rest/details/{UUID}/,Confirm state transition from Deploying to Stopped,,Working as intended,"def test_ideploy_details_comfirm_state_from_deployimg_to_stop(ideploy_details): """""" - resuming a paused VM without authorization + Confirm state transition from Deploying to Stopped """""" - deploy_id = ""invalid"" - depl_resume = anonymous_exec_api.deploy_resume(deploy_id, wait=False) - depl_json = depl_resume.json() - test_assert.status(depl_resume, 401) - assert depl_json[""detail""] == ""Authentication credentials were not provided."" -" -/deploy/rest/resume/{{UUID}}/,"resuming a VM by a manager when valid UUID provided, machine is in paused state and manager has rights over servers",,," -endpoint = ""deploy_resume"" -PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""paused""], ""deploy_with"": SRV_MANAGER_RIGHTS}] + x, r = ideploy_details + rjson = r.json() + assert rjson['state'] == ""stopped"", 'The error is %s' % rjson['state'] -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_deploy_deploy_resume_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): +" +/ideploy/rest/details/{UUID}/,Confirm network segments that were deployed with the Island,,Attached as was intended,"def test_ideploy_details_comfirm_network_segments(ideploy_deploy, run_api): """""" - Resuming the VM by manager when have right on server + Confirm network segments that were deployed with the Island """""" - # When the user is not part of the group that the manager manages - deploy_id = custom_lib_admin_operations - r = run_api.deploy_resume(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + params, r = ideploy_deploy + network_segments = params['network_segments']['add'] + rjson = r.json() + deploy_id = rjson[""deploy_uuid""] + res = run_api.ideploy_details(deploy_id) + result = res.json() + r_network_segments = result['island']['network_segments'][2:] - # When the user is part of the group that the manager manages and deployment is on manager rights to server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_resume(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) -" -/deploy/rest/resume/{{UUID}}/,"resuming a VM by a manager when valid UUID provided, machine is in paused state but manager do not have rights over servers",,,"endpoint = ""deploy_resume"" + for i, j in zip(network_segments, r_network_segments): + assert i.get('name') == j.get('name') + assert i.get('description') == j.get('description') + assert i.get('enable_ipv4') == j.get('enable_ipv4') -PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""paused""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_deploy_deploy_resume_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + +" +/ideploy/rest/details/{UUID}/,Confirm Machines details with the deployment,,Created as was worked,"def test_ideploy_details_comfirm_machines_details(library_add_new_vm, run_api): """""" - Resuming the VM by manager when have no right on server + Confirm network segments that were deployed with the Island """""" - # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server - deploy_id = custom_lib_admin_operations - r = run_api.deploy_resume(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) - - # When the user is part of the group that the manager manages but the deployment is not on manager rightful server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_resume(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + params1, r1 = library_add_new_vm + island_name = rand_string() + mc_name = rand_string() + params = {""name"": island_name, + ""description"": f""This is description for {island_name}"", + ""is_public"": True, + ""machines"": {""add"": [{""uuid"": r1[""uuid""], ""name"": mc_name, ""description"": f""This is description for {mc_name}"", ""nics"": {""update"": []}, ""network_segments"": {""add"": []}}]}} + params, r = run_api.ilibrary_add_new_island(params=params) + island_uuid = r.json()[""uuid""] + res = run_api.ideploy_deploy(uuid=island_uuid) + deploy_uuid = res.json()[""deploy_uuid""] + r_details = run_api.ideploy_details(deploy_uuid) + result = r_details.json() + assert result['island']['name'] == f""{params['name']} #1"" + assert result[""island""]['description'] == params['description'] + assert result['machines'][0]['name'] == f""{params['machines']['add'][0]['name']} #1"" + assert result['machines'][0]['description'] == params['machines']['add'][0]['description'] + run_api.ideploy_delete(uuid=deploy_uuid) + run_api.ilibrary_delete(uuid=island_uuid, params={}) " -/deploy/rest/resume/{{UUID}}/,resuming a VM by a non-admin user when valid UUID provided and machine is in paused state,,"{ -""status"" : 403 -}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""paused""]}] +/ideploy/rest/edit/{UUID}/,resuming the deployed island machine when requested with invalid token,"deploy_id = ""invalid-deploy_uuid"" +edit_param = { +""name"": ""modified_colama"", +""description"": ""testing for edit"", +""allow_duplicate_network"": False +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ideploy_edit_with_invalid_token(invalid_exec_api): + """""" + Editing the Island deploy details with invalid token + """""" + deploy_id = ""invalid-deploy_uuid"" + edit_param = {""name"": ""modified_colama"", + ""description"": ""testing for edit"", + 'allow_duplicate_network': False + } + r = invalid_exec_api.ideploy_edit(deploy_id, params=edit_param) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_deploy_resume_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): +" +/ideploy/rest/edit/{UUID}/,editting the deployed island machine without authorization,"{ +""name"": ""modified_colama"", +""description"": ""testing for edit"", + 'allow_duplicate_network': False +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_edit_without_authorization(anonymous_exec_api): """""" - Resuming the VM by non-admin + Editing the Island deploy details without authorization """""" - # Non-admin check of Starting a deployment created by different user - deploy_id = custom_lib_admin_operations - r = run_api.deploy_resume(deploy_id) - test_assert.status(r, 403) + deploy_id = ""invalid-deploy_uuid"" + edit_param = {""name"": ""modified_colama"", + ""description"": ""testing for edit"", + 'allow_duplicate_network': False + } + r = anonymous_exec_api.ideploy_edit(deploy_id, params=edit_param) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" + " -/deploy/rest/resume/{{UUID}}/,resuming a VM by an admin user when valid UUID is provided and machine is in paused state,,"{ -""status"" : 201 -}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_deploy_resume_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): +/ideploy/rest/edit/{UUID}/,editing the deployment of island machine when requested with invalid token,"deploy_id = ""invalid-deploy_uuid"" +edit_param = { +""name"": ""modified_colama"", +""description"": ""testing for edit"", + ""allow_duplicate_network"": False +}","{ + ""status"" : 404, +}","def test_ideploy_edit_with_invalid_uuid(run_api): """""" - Resuming the VM by Admin + Editing the Island deploy details by invalid uuid """""" - # Admin check of Resuming a deployment created by different user - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_resume(deploy_id) - test_assert.status(r, 201) + deploy_id = ""invalid-deploy_uuid"" + edit_param = {""name"": ""modified_colama"", + ""description"": ""testing for edit"", + 'allow_duplicate_network': False + } + r = run_api.ideploy_edit(deploy_id, params=edit_param) + test_assert.status(r, 404) " -/deploy/rest/resume/{{UUID}}/,resuming a VM when valid UUID is provided and machine is in paused state,,"{ -""status"" : 200, -""response"" : Machine state set to running -}","def test_deploy_deploy_resume_self(deploy_resume): +/ideploy/rest/edit/{UUID}/,Editing a Deployment with no parameters,,"{ + ""status"" : 400, + ""message"" : ""This field is required"", +}","def test_ideploy_edit_no_parameters(run_api, ideploy_deploy): """""" - Resuming the VM + Editing the Island deploy details with No Parameters """""" - x, r = deploy_resume - test_assert.status(r, 201) -" -/deploy/rest/resume/{{UUID}}/,resuming a VM when valid UUID is provided and machine is in resumed state,,"{ -""status"" : 400, -""message"" : ""Cannot perform resume operation on running state of a machine"" -}","def test_deploy_deploy_resume_already_resumed_vm(deploy_resume, run_api): + param, result = ideploy_deploy + rjson = result.json() + deploy_id = rjson[""deploy_uuid""] + edit_param = {} + r = run_api.ideploy_edit(deploy_id, params=edit_param) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['allow_duplicate_network'] == ['This field is required.'] + + +" +/ideploy/rest/edit/{UUID}/,Edit a Deployment which you are not an owner of but with Admin rights,"{ +""name"": ""modified_colama"", +""description"": ""testing for edit"", +""allow_duplicate_network"": False +}","{ + ""status"" : 202, +}","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_edit_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): """""" - resuming a VM which is already resumed + Changing of Island Deployed Image by Admin """""" - x, r = deploy_resume - res = r.json() - deploy_id = res[""UUID""] - response = run_api.deploy_resume(deploy_id) - test_assert.status(response, 400) - rjson = response.json()[""error""] - assert rjson == ""Cannot perform resume operation on running state of a machine"" + ideploy_id = custom_ilib_non_admin_operations + edit_param = {""name"": ""modified_colama"", ""description"": ""testing for edit"", 'allow_duplicate_network': False} + r = run_api.ideploy_edit(ideploy_id, params=edit_param) + test_assert.status(r, 202) " -/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine without Authorization,"{ - deploy_id = ""invalid"" +/ideploy/rest/edit/{UUID}/,Edit a Deployment which you are not an owner of and without Admin rights,"{ +""name"": ""modified_colama"", +""description"": ""testing for edit"", +""allow_duplicate_network"": False }","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" + ""status"" : 403, }"," -def test_deploy_shutdown_without_authorization(anonymous_exec_api): +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_edit_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): """""" - Shutdown the VM without authorization + Changing of Island Deployed Image by non-Admin """""" - deploy_id = ""invalid"" - depl_shutdown = anonymous_exec_api.deploy_shutdown(deploy_id, wait=False) - depl_json = depl_shutdown.json() - test_assert.status(depl_shutdown, 401) - assert depl_json[""detail""] == ""Authentication credentials were not provided."" + # Non-admin check for changing details of a Deployed Image created by different user. + ideploy_id = custom_ilib_admin_operations + edit_param = {""name"": ""modified_colama"", ""description"": ""testing for edit"", 'allow_duplicate_network': False} + r = run_api.ideploy_edit(ideploy_id, params=edit_param) + test_assert.status(r, 403) " -/deploy/rest/shutdown/{{UUID}}/,"shutting down the deployment of machine by manager when valid UUID is provided ,machine is in running state and manager has rights over servers",,,"PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_MANAGER_RIGHTS}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_deploy_shutdown_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): +/ideploy/rest/edit/{UUID}/,adding duplicate MACs inside NIC of same Machine and set allow_duplicate_network as false,,"{ + ""status"" : 400, +}"," +def test_ideploy_edit_add_duplicate_mac_in_same_machine(run_api): """""" - Shutdown the VM by manager when have right on server + Editing the Island deploy details by Add duplicate MACs inside NIC of same Machine and set allow_duplicate_network as false """""" - # When the user is not part of the group that the manager manages - deploy_id = custom_lib_admin_operations - r = run_api.deploy_shutdown(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) - - # When the user is part of the group that the manager manages and deployment is on manager rights to server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_shutdown(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) -" -/deploy/rest/shutdown/{{UUID}}/,"shutting down the deployment of machine by manager when valid UUID is provided ,machine is in running state but manager do not have rights over servers",,,"PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] + networks = template_networks() + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_deploy_shutdown_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): - """""" - Rebooting the VM by manager when have no right on server - """""" - # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server - deploy_id = custom_lib_admin_operations - r = run_api.deploy_shutdown(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + params3 = { + ""name"": ""test_ideploy"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } + params3, r3 = run_api.ilibrary_add_new_island(params=params3) + rjson3 = r3.json() + uuid = rjson3['uuid'] - # When the user is part of the group that the manager manages but the deployment is not on manager rightful server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_shutdown(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) -" -/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine by non-admin when valid UUID is provided and machine is in running state ,,"{ -""status"" : 403 -}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""running""]}] + r = run_api.ideploy_deploy(uuid) + deploy_id = r.json()['deploy_uuid'] + r = run_api.ideploy_details(deploy_id) + rjson = r.json() + machine_uuid = rjson['machines'][0]['uuid'] + machine_mac = rjson['machines'][0]['machine']['hw']['networks'][0]['mac'] + island_uuid = rjson['island']['uuid'] -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_shutdown_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): - """""" - Shutdown the VM by non-admin - """""" - # Non-admin check of shutdown a deployment created by different user - deploy_id = custom_lib_admin_operations - r = run_api.deploy_shutdown(deploy_id) - test_assert.status(r, 403) + params3 = { + ""updated_machines"": [ + { + ""uuid"": machine_uuid, + 'nics': { + 'add': [ + { + ""model"": ""virtio"", + 'mac': machine_mac + } + ] + } + } + ], + 'allow_duplicate_network': False + } + r = run_api.ideploy_edit(deploy_id, params=params3) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""Island ["" + island_uuid + ""] getting duplicate mac_addresses. Use `allow_duplicate_network` to force continue..."" + run_api.ideploy_delete(deploy_id) + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid, params3) + run_api.library_delete(r1.json()[""uuid""]) " -/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine that is in running state,,"{ -""status"" : 201, -""response"" : Machine shutdown -}","def test_deploy_shutdown_self(deploy_shutdown): +/ideploy/rest/edit/{UUID}/,Add duplicate MACs inside NIC of different Machines and set allow_duplicate_network as false ,,"{ + ""status"" : 400, +}"," +def test_ideploy_edit_update_duplicate_mac_in_different_machines(run_api): """""" - Shutdown the VM + Editing the Island deploy details by Add duplicate MACs inside NIC of different Machines and set allow_duplicate_network as false """""" - param, r = deploy_shutdown - test_assert.status(r, 201) + networks = template_networks() + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + params2, r2 = run_api.library_add_new_vm(networks=networks) + + params3 = { + ""name"": ""test_ideploy"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + }, + { + ""uuid"": r2.json()[""uuid""] + } + ] + }, + ""is_public"": False + } + params3, r3 = run_api.ilibrary_add_new_island(params=params3) + rjson3 = r3.json() + uuid = rjson3['uuid'] + r = run_api.ideploy_deploy(uuid) + deploy_id = r.json()['deploy_uuid'] + r = run_api.ideploy_details(deploy_id) + rjson = r.json() + machine1_uuid = rjson['machines'][0]['uuid'] + machine2_uuid = rjson['machines'][0]['uuid'] + island_uuid = rjson['island']['uuid'] + params3 = { + ""updated_machines"": [ + { + ""uuid"": machine1_uuid, + 'nics': { + 'add': [ + { + ""model"": ""virtio"", + 'mac': '56:54:00:0C:8A:4A' + } + ] + } + }, + { + ""uuid"": machine2_uuid, + 'nics': { + 'add': [ + { + ""model"": ""virtio"", + 'mac': '56:54:00:0C:8A:4A' + } + ] + } + } + ], + 'allow_duplicate_network': False + } + r = run_api.ideploy_edit(deploy_id, params=params3) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""Island ["" + island_uuid + ""] getting duplicate mac_addresses. Use `allow_duplicate_network` to force continue..."" + run_api.ideploy_delete(deploy_id) + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid, params3) + run_api.library_delete(r1.json()[""uuid""]) + run_api.library_delete(r2.json()[""uuid""]) " -/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine when no machine exists for the deploy id,"{ - deploy_id = ""invalid"" -}","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_shutdown_invalid_UUID(run_api): +/ideploy/rest/list/,successfully fetching the list of deployed islands using invalid uuid,,"{ +""status"":200, +""response"": list of deployed islands +}","def test_ideploy_list_invalid_uuid(run_api): """""" - Shutdown the VM using id for which machine does not exist + Fetching the list of deployed islands using invalid uuid """""" - deploy_id = ""invalid"" - r = run_api.deploy_shutdown(deploy_id, wait=False) - test_assert.status(r, 404) - rjson = r.json() - assert ""Machine matching query does not exist"" in rjson[""error""], rjson + params = {""uuid"": ""invalid""} + r = run_api.ideploy_list(params) + test_assert.status(r, 200) " -/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine when requested with invalid token,"{ - deploy_id = ""invalid"" -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_deploy_shutdown_invalid_token(invalid_exec_api): +/ideploy/rest/list/,successfully fetching the list of deployed islands using invalid name,"{""name"": ""invalid""}","{ +""status"":200, +""response"": list of deployed islands +}"," +def test_ideploy_list_invalid_name(run_api): + params = {""name"": ""invalid""} + r = run_api.ideploy_list(params) + test_assert.status(r, 200) +" +/ideploy/rest/list/,"successfully fetching the list of deployed islands by adding filters. Check the user type before performing the operation. +",,"{ +""status"":200, +""response"": list of deployed islands +}","def test_ideploy_list_filter(run_api): """""" - Shutdown the VM using invalid token + Fetching the list of deployed islands by adding filters """""" - deploy_id = ""invalid"" - depl_shutdown = invalid_exec_api.deploy_shutdown(deploy_id, wait=False) - depl_json = depl_shutdown.json() - test_assert.status(depl_shutdown, 401) - assert depl_json[""detail""] == ""Invalid token."" + params, res, isl_res = [], [], [] + ideploy_count = 10 + arch = run_api.arch_type + prefix_name = f""filter_island_2_{rand_string()}_"" + isl_lib_name = [f""{prefix_name}{rand_string()}"" for _ in range(ideploy_count)] + networks = template_networks() + if arch == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params3, r3 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + params2, r2 = run_api.library_add_new_vm(networks=networks) + params3, r3 = run_api.library_add_new_vm(networks=networks) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nic_update_id"": r1.json()[""hw""][""networks""][0][""id""], + ""nic_delete_id"": r1.json()[""hw""][""networks""][2][""id""] + } + machine2 = { + ""uuid"": r2.json()[""uuid""], + ""nic_update_id"": r2.json()[""hw""][""networks""][1][""id""], + ""nic_delete_id"": r2.json()[""hw""][""networks""][0][""id""] + } + machine3 = { + ""uuid"": r3.json()[""uuid""], + ""nic_update_id"": r3.json()[""hw""][""networks""][2][""id""], + ""nic_delete_id"": r3.json()[""hw""][""networks""][1][""id""] + } + for i in range(ideploy_count): + param, r = run_api.ilibrary_add_new_island(machine1=machine1, machine2=machine2, + machine3=machine3, name=isl_lib_name[i]) + isl_uuid = r.json()[""uuid""] + params.append(param) + res.append(r) + isl_r = run_api.ideploy_deploy(isl_uuid) + isl_res.append(isl_r) + random_int = randint(0, 9) + name_filter = {""name"": params[random_int].get(""name"") + "" #1"", ""page_size"": ideploy_count} + uuid_filter = {""uuid"": isl_res[random_int].json().get(""deploy_uuid""), ""page_size"": ideploy_count} + owner_filter = {""owner"": ""colama"" if run_api.user_type == ""admin"" + else ""vivekt"" if run_api.user_type == ""non-admin"" + else ""manager"", ""search"": prefix_name, ""page_size"": ideploy_count} + exp_res = { + 0: [i.get(""name"") for i in params if i.get(""name"") + "" #1"" == name_filter.get(""name"")], + 1: [i.json().get(""deploy_uuid"") for i in isl_res if i.json().get(""deploy_uuid"") == uuid_filter.get(""uuid"")], + 2: [i.json().get(""owner"") for i in res], + } + filters = [name_filter, uuid_filter, owner_filter] + for filter in range(len(filters)): + r = run_api.ideploy_list(filters[filter]) + # check for valid response data with the filter parameters + if len(r.json().get(""results"")) != len(exp_res[filter]): + logging.error(f""error in filter: {filters[filter]}, the list of expected result for the filter is: {exp_res[filter]}, and the actual result is {r.json()}"") + assert False + test_assert.status(r, 200) + run_api.library_delete(r1.json()[""uuid""], params1) + run_api.library_delete(r2.json()[""uuid""], params2) + run_api.library_delete(r3.json()[""uuid""], params3) + for i in range(ideploy_count): + isl_rjson = isl_res[i].json() + if 'error' not in isl_rjson.keys(): + uuid = isl_rjson[""deploy_uuid""] + run_api.ideploy_delete(uuid) + ilib_rjson = res[i].json() + if 'error' not in ilib_rjson.keys(): + uuid = ilib_rjson[""uuid""] + run_api.ilibrary_delete(uuid, params[i]) " -/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine when valid UUID is provided and machine is in paused state,,"{ -""status"" : 400, -""message"" : ""Cannot perform shutdown operation on paused state of a machine"" -}","def test_deploy_shutdown_paused_vm(deploy_start, run_api): +/ideploy/rest/list/,successfully fetching the list of deployed islands,,"{ +""status"":200, +""response"": list of deployed islands +}","def test_ideploy_list(ideploy_list): """""" - Shutdown the VM which is in pause state + Fetching the list of deployed islands """""" - x, r = deploy_start - deploy_id = x[""UUID""] - run_api.deploy_pause(deploy_id, wait=True) - res = run_api.deploy_shutdown(deploy_id) - test_assert.status(res, 400) - assert res.json()[""error""] == ""Cannot perform shutdown operation on paused state of a machine. Try `STOP` instead."" + template, r = ideploy_list + test_assert.status(r, 200) " -/deploy/rest/shutdown/{{UUID}}/,shutting down the deployment of machine when valid UUID is provided and machine is in stopped state,,"{ -""status"" : 400, -""message"" : ""Cannot perform shutdown operation on stopped state of a machine"" -}","def test_deploy_shutdown_already_stopped_vm(run_api, deploy_stop): +/ideploy/rest/list/,fetching the list of deployed islands which is filtered on created and update DateTime Filter,"{ + uuid = 'valid-deployment-uuid' +}",,"def test_ideploy_filter_timefilter(run_api: apiops, ilibrary_add_new_island): """""" - Shutdown the VM that is in stopped state + Filter on created and update DateTime Filter """""" - x, result = deploy_stop - deploy_id = x[""UUID""] - response = run_api.deploy_shutdown(deploy_id) + template, r = ilibrary_add_new_island + rjson = r.json() + ilib_id = rjson[""uuid""] + r = run_api.ideploy_deploy(ilib_id) + ideploy_id = r.json()[""deploy_uuid""] + r_details = run_api.ideploy_details(ideploy_id).json() + # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' + str_ctime = r_details['ctime'].replace('T', ' ').replace('Z', '') + datetime_ctime = convert_datetime_stringform(r_details['ctime']) + + def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): + """""" + Function to handle corner case if machine was created a day before and test get triggered on new day + """""" + if not utc: + created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, + ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 + # Filter on UTC time + # .... When the datetime is selected to be the same as in detail + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, + ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 + # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # .........When the created_date_range format is invalid + response = run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) test_assert.status(response, 400) - rjson = response.json()[""error""] - assert rjson == ""Cannot perform shutdown operation on stopped state of a machine"" + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has whitespaces in them + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + + # Filter on IST time + # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 IST and test get triggered at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 IST and test get triggered at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 IST and test get triggered on new week at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 IST and test get triggered on new month at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year + try: + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 IST and test get triggered on new year at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # .........When the created_date_range format is invalid + response = run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has whitespaces in them + assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + run_api.ideploy_delete(ideploy_id) " -/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine by Admin user using valid UUID and machine state is in stopped state,,"{ -""status"" : 201, -""response"" : Snapshot taken -}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_snapshot_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): +/ideploy/rest/list/,fetching the list of deployed islands when provided with tag value,"{ +""tags"": tag_value +}","{ + ""status"": 200, + ""response"": list of deployed islands +}","def test_ideploy_list_by_tag_value(run_api, ideploy_details): """""" - Snapshot the VM by Admin + when provided with tag value """""" - # Admin check of Starting a deployment created by different user - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_snapshot(deploy_id) - test_assert.status(r, 201) + x, r = ideploy_details + detail = r.json() + tag_value = detail['tags'][0]['value'] + params = {""tags"": tag_value} + r = run_api.ideploy_list(params) + test_assert.status(r, 200) rjson = r.json() - run_api.library_delete(rjson['snapshotted_machine_UUID'], {}) -" -/deploy/rest/snapshot/{{UUID}}/,"taking snapshot of the virtual machine by manager using valid UUID, machine state is in stopped state and the manager has rights over the servers",,,"endpoint = ""deploy_snapshot"" -PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) - -def test_deploy_snapshot_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): - """""" - Snapshot the VM by manager when have right on server - """""" - # When the user is not part of the group that the manager manages - deploy_id = custom_lib_admin_operations - r = run_api.deploy_snapshot(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) - - # When the user is part of the group that the manager manages and deployment is on manager rights to server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_snapshot(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) - run_api.deploy_stop(deploy_id) -" -/deploy/rest/snapshot/{{UUID}}/,"taking snapshot of the virtual machine by manager using valid UUID, machine state is in stopped state but the manager do not have rights over the server",,,"endpoint = ""deploy_snapshot"" - -PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_deploy_snapshot_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): - """""" - Snapshot the VM by manager when have no right on server - """""" - # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server - deploy_id = custom_lib_admin_operations - r = run_api.deploy_snapshot(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) - - # When the user is part of the group that the manager manages but the deployment is not on manager rightful server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_snapshot(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) -" -/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine by non-admin user using valid UUID and machine state is in stopped state,,"{ -""status"" : 403 -}","PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] - -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_snapshot_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): - """""" - Snapshot the VM by non-admin - """""" - # Non-admin check of Starting a deployment created by different user - deploy_id = custom_lib_admin_operations - r = run_api.deploy_snapshot(deploy_id) - test_assert.status(r, 403) + for island in rjson['results']: + result = run_api.ideploy_details(island['uuid']).json() + assert tag_value == result['tags'][0]['value'], ""|> Json %s"" % result " -/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine using a valid UUID and machine state is in stopped state,,"{ -""status"" : 201, -""response"" : Snapshot taken -}","def test_deploy_snapshot_self(deploy_snapshot): - """""" - Snapshot the VM - """""" - r = deploy_snapshot - test_assert.status(r, 201)" -/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine when Invalid UUID is provided,"{ -deploy_id = ""invalid"" +/ideploy/rest/list/,"fetching the list of deployed islands when provided with tag name in [""_sessionid"", ""_session_name"", ""_session_created_on""]","{ +tag_name = ""valid-name"" }","{ -""status"" : 404, -""message"" : ""Machine matching query does not exist""","def test_deploy_snapshot_invalid_UUID(run_api): + ""status"": 200, + ""response"": list of deployed islands +}","def test_ideploy_list_by_tag_name(run_api, ideploy_details): """""" - Snapshot of the machine using an invalid machine uuid + when provided with tag name """""" - deploy_id = ""invalid"" - r = run_api.deploy_snapshot(deploy_id, wait=False) - test_assert.status(r, 404) + x, r = ideploy_details + detail = r.json() + tag_value = detail['tags'][0]['name'] + params = {""tags"": tag_value} + r = run_api.ideploy_list(params) + test_assert.status(r, 200) rjson = r.json() - assert ""Machine matching query does not exist"" in rjson[""error""], rjson + for island in rjson['results']: + result = run_api.ideploy_details(island['uuid']).json() + assert tag_value == result['tags'][0]['name'], ""|> Json %s"" % result " -/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine when requested using invalid token,"{ -deploy_id = ""invalid"" +/ideploy/rest/list/,fetching the list of deployed islands when no Token Provided,"{ + uuid = 'valid-existing-island-library-uuid' }","{ ""status"" : 401, -""message"" : ""Invalid token"" -}","def test_deploy_snapshot_invalid_token(invalid_exec_api): +""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_list_without_authorization(anonymous_exec_api): """""" - Snapshot of the machine using invalid tokens + Fetching the list of deployed islands without authorization """""" - deploy_id = ""invalid"" - depl_snapshot = invalid_exec_api.deploy_snapshot(deploy_id, wait=False) - depl_json = depl_snapshot.json() - test_assert.status(depl_snapshot, 401) - assert depl_json[""detail""] == ""Invalid token."" + idepl_list = anonymous_exec_api.ideploy_list() + idepl_json = idepl_list.json() + test_assert.status(idepl_list, 401) + assert idepl_json[""detail""] == ""Authentication credentials were not provided."" + " -/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine when valid UUID is provided and machine is in running state,,"{ -""status"" : 400, -""message"" : ""Cannot perform snapshot operation on running state of a machine""","def test_deploy_snapshot_running_vm(run_api, deploy_start): +/ideploy/rest/list/,fetching the list of deployed islands by user who does not own the deployed image,"{ + uuid = 'valid-deployment-uuid' +}","{ + ""status"": 200, + ""response"": list of deployed islands +}","def test_ideploy_list_island_not_owner(skip_if_non_admin, non_admin_exec_api, ideploy_deploy): """""" - Snapshot of the machine which is in running state + Fetching the list of deployed islands of other user """""" - x, result = deploy_start - deploy_id = x[""UUID""] - response = run_api.deploy_snapshot(deploy_id, wait=False) - test_assert.status(response, 400) - rjson = response.json()[""error""] - assert rjson == ""Cannot perform snapshot operation on running state of a machine"" + template, r = ideploy_deploy + idepl_list = non_admin_exec_api.ideploy_list() + user = non_admin_exec_api.user + owners = [result['island']['owner'] for result in idepl_list.json()['results'] if result['island']['owner'] != user] + test_assert.status(idepl_list, 200) + assert len(owners) == 0 " -/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine which is part of the island,,"{ -""message"" : ""Snapshot of machine which is part of island is not allowed"" -}","def test_deploy_snapshot_machine_part_of_island(run_api, ideploy_deploy): +/ideploy/rest/list/,fetching the list of deployed islands by providing tag value,"{ +""scope"": ""all"" +}","{ + ""status"": 200, + ""response"": list of deployed islands +}","def test_ideploy_list_by_scope(run_api): """""" - Snapshot of the machine that is part of the island + filter by scope """""" - params, r = ideploy_deploy - deploy_id = r.json()[""deploy_UUID""] - machine_id = run_api.ideploy_details(UUID=deploy_id).json()[""machines""][0][""UUID""] - res = run_api.deploy_snapshot(deploy_id=machine_id) - assert res.json()[""error""] == ""Snapshot of machine which is part of island is not allowed"" + params = {""scope"": ""all""} + r = run_api.ideploy_list(params) + test_assert.status(r, 200) " -/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine with description,"{ -""description"" : ""description here"" -}","{ -""status"" : 201, -}","def test_deploy_snapshot_machine_with_description(run_api, deploy_image): +/ideploy/rest/list/,fetching the list of deployed islands when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ideploy_list_invalid_token(invalid_exec_api): """""" - Snapshot of the machine while giving description + Fetching the list of deployed islands by invalid token """""" - params, r = deploy_image - machine_id = r.json()[""UUID""] - description = ""Random"" - res = run_api.deploy_snapshot(deploy_id=machine_id, description=description) - snapshotted_machine_UUID = res.json()['snapshotted_machine_UUID'] - current_desp = run_api.library_details(UUID=snapshotted_machine_UUID, params={}).json()[""description""] - run_api.library_delete(snapshotted_machine_UUID, {}) - test_assert.status(res, 201) - assert current_desp == description, ""The error is %s"" % res.json() + idepl_list = invalid_exec_api.ideploy_list() + idepl_json = idepl_list.json() + test_assert.status(idepl_list, 401) + assert idepl_json[""detail""] == ""Invalid token."" + " -/deploy/rest/snapshot/{{UUID}}/,taking snapshot of the virtual machine without Authorization,"{ -deploy_id = ""invalid"" -}","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_snapshot_without_authorization(anonymous_exec_api): +/ideploy/rest/list/,fetching the filtered list of deployed islands. Check the user type before performing the operation.,,"{ + ""status"": 200, + ""response"": filtered list of deployed islands +}","def test_ideploy_list_filter(run_api): """""" - Snapshot of the machine without authorization + Fetching the list of deployed islands by adding filters """""" - deploy_id = ""invalid"" - depl_snapshot = anonymous_exec_api.deploy_snapshot(deploy_id, wait=False) - depl_json = depl_snapshot.json() - test_assert.status(depl_snapshot, 401) - assert depl_json[""detail""] == ""Authentication credentials were not provided."" + params, res, isl_res = [], [], [] + ideploy_count = 10 + arch = run_api.arch_type + prefix_name = f""filter_island_2_{rand_string()}_"" + isl_lib_name = [f""{prefix_name}{rand_string()}"" for _ in range(ideploy_count)] + networks = template_networks() + if arch == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params3, r3 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + params2, r2 = run_api.library_add_new_vm(networks=networks) + params3, r3 = run_api.library_add_new_vm(networks=networks) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nic_update_id"": r1.json()[""hw""][""networks""][0][""id""], + ""nic_delete_id"": r1.json()[""hw""][""networks""][2][""id""] + } + machine2 = { + ""uuid"": r2.json()[""uuid""], + ""nic_update_id"": r2.json()[""hw""][""networks""][1][""id""], + ""nic_delete_id"": r2.json()[""hw""][""networks""][0][""id""] + } + machine3 = { + ""uuid"": r3.json()[""uuid""], + ""nic_update_id"": r3.json()[""hw""][""networks""][2][""id""], + ""nic_delete_id"": r3.json()[""hw""][""networks""][1][""id""] + } + for i in range(ideploy_count): + param, r = run_api.ilibrary_add_new_island(machine1=machine1, machine2=machine2, + machine3=machine3, name=isl_lib_name[i]) + isl_uuid = r.json()[""uuid""] + params.append(param) + res.append(r) + isl_r = run_api.ideploy_deploy(isl_uuid) + isl_res.append(isl_r) + random_int = randint(0, 9) + name_filter = {""name"": params[random_int].get(""name"") + "" #1"", ""page_size"": ideploy_count} + uuid_filter = {""uuid"": isl_res[random_int].json().get(""deploy_uuid""), ""page_size"": ideploy_count} + owner_filter = {""owner"": ""colama"" if run_api.user_type == ""admin"" + else ""vivekt"" if run_api.user_type == ""non-admin"" + else ""manager"", ""search"": prefix_name, ""page_size"": ideploy_count} + exp_res = { + 0: [i.get(""name"") for i in params if i.get(""name"") + "" #1"" == name_filter.get(""name"")], + 1: [i.json().get(""deploy_uuid"") for i in isl_res if i.json().get(""deploy_uuid"") == uuid_filter.get(""uuid"")], + 2: [i.json().get(""owner"") for i in res], + } + filters = [name_filter, uuid_filter, owner_filter] + for filter in range(len(filters)): + r = run_api.ideploy_list(filters[filter]) + # check for valid response data with the filter parameters + if len(r.json().get(""results"")) != len(exp_res[filter]): + logging.error(f""error in filter: {filters[filter]}, the list of expected result for the filter is: {exp_res[filter]}, and the actual result is {r.json()}"") + assert False + test_assert.status(r, 200) + run_api.library_delete(r1.json()[""uuid""], params1) + run_api.library_delete(r2.json()[""uuid""], params2) + run_api.library_delete(r3.json()[""uuid""], params3) + for i in range(ideploy_count): + isl_rjson = isl_res[i].json() + if 'error' not in isl_rjson.keys(): + uuid = isl_rjson[""deploy_uuid""] + run_api.ideploy_delete(uuid) + ilib_rjson = res[i].json() + if 'error' not in ilib_rjson.keys(): + uuid = ilib_rjson[""uuid""] + run_api.ilibrary_delete(uuid, params[i]) " -/deploy/rest/start/{{UUID}}/,starting deployment of machine by admin with a valid UUID and machine is in stopped state,,"{ -""status"" : 201, -""response"" : Machine should got to running state -}"," -endpoint = ""deploy_start"" - -PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] -PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] -PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_start_vm_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): +/ideploy/rest/pause/{UUID}/,snapshotting the deployed island machine,,"{ +""status"":201 +}","def test_ideploy_pause_self(ideploy_pause): """""" - Deploying a Image and Starting the VM and then Stopping by Admin + Pausing the Island """""" - # Admin check of Starting a deployment created by different user - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_start(deploy_id) + r = ideploy_pause test_assert.status(r, 201) - run_api.deploy_stop(deploy_id)" -/deploy/rest/start/{{UUID}}/,"starting deployment of machine by manager with a valid UUID and machine is in stopped state , where manager does not have rights over servers",,," -endpoint = ""deploy_start"" - -PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] -PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] -PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] - - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_deploy_start_vm_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): - """""" - Deploying a Image and Starting the VM and then Stopping - """""" - # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server - deploy_id = custom_lib_admin_operations - r = run_api.deploy_start(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) - run_api.deploy_stop(deploy_id) - - # When the user is part of the group that the manager manages but the deployment is not on manager rightful server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_start(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) - run_api.deploy_stop(deploy_id) " -/deploy/rest/start/{{UUID}}/,"starting deployment of machine by manager with a valid UUID and machine is in stopped state , where manager has rights over servers",,," -endpoint = ""deploy_start"" - -PARAMETERS = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""]}] -PARAMETERS_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_MANAGER_RIGHTS}] -PARAMETERS_NO_SRV_RIGHT = [{""dest_obj"": OBJ_DEPL, ""final_state"": DEPL_STATE[""stopped""], ""deploy_with"": SRV_NO_MANAGER_RIGHTS}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_deploy_start_vm_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): - """""" - Deploying a Image and Starting the VM and then Stopping - """""" - # When the user is not part of the group that the manager manages - deploy_id = custom_lib_admin_operations - r = run_api.deploy_start(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) +/ideploy/rest/pause/{UUID}/,pausing the deployment of island machine when requested with invalid token,"{ +uid =""invalid"" +} - # When the user is part of the group that the manager manages and deployment is on manager rights to server - deploy_id = custom_lib_non_admin_operations - r = run_api.deploy_start(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) - run_api.deploy_stop(deploy_id) -" -/deploy/rest/start/{{UUID}}/,starting deployment of machine using invalid id for which no machine exists,"{ - deploy_id = ""invalid"" -}","{ -""status"" : 404, -""message"" : ""Machine matching query does not exist"" -}","def test_deploy_start_invalid_UUID(run_api): +","{ + ""status"" : 404, + ""message"" : ""Deployed island doesnt exist."" +}","def test_ideploy_pause_with_invalid_uuid(run_api): """""" - starting a nonexisting machine using UUID + test_ideploy_pause_without_authorization """""" - deploy_id = ""invalid"" - r = run_api.deploy_start(deploy_id, wait=False) + uid = ""invalid"" + r = run_api.ideploy_pause(uuid=uid) test_assert.status(r, 404) - rjson = r.json() - assert ""Machine matching query does not exist"" in rjson[""error""], rjson + assert r.json()[""error""] == ""Deployed Island Doesn't Exist"" " -/deploy/rest/start/{{UUID}}/,starting deployment of machine using Valid UUID which is already in a running state,,"{ -""status"" : 400, -""message"" : ""Cannot perform start operation on running state of a machine"" -}","def test_deploy_start_already_running_vm(deploy_start, run_api): +/ideploy/rest/pause/{UUID}/,pausing the deployment of a deployed island machine without authorization,"{ +uid =""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_pause_without_authorization(anonymous_exec_api): """""" - starting a machine that is already running + test_ideploy_pause_without_authorization """""" - x, r = deploy_start - deploy_id = x[""UUID""] - r = run_api.deploy_start(deploy_id, wait=False) - test_assert.status(r, 400) - rjson = r.json()[""error""] - assert rjson == ""Cannot perform start operation on running state of a machine"" + uid = ""invalid"" + r = anonymous_exec_api.ideploy_pause(uuid=uid, wait=False) + test_assert.status(r, 401) + assert r.json()[""detail""] == ""Authentication credentials were not provided."" " -/deploy/rest/start/{{UUID}}/,starting deployment of machine without Authorization,"{ - deploy_id = ""invalid"" +/ideploy/rest/pause/{UUID}/,pausing the deployment of a deployed island machine when requested with invalid token,"{ +uid =""invalid"" }","{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_start_without_authorization(anonymous_exec_api): +""message"" : ""Invalid token"" +}","def test_ideploy_pause_with_invalid_token(invalid_exec_api): """""" - starting machine deployment without authorization + test_ideploy_pause_with_invalid_token """""" - deploy_id = ""invalid"" - depl_start = anonymous_exec_api.deploy_start(deploy_id, wait=False) - depl_json = depl_start.json() - test_assert.status(depl_start, 401) - assert depl_json[""detail""] == ""Authentication credentials were not provided."" + uid = ""Invalid"" + r = invalid_exec_api.ideploy_pause(uuid=uid, wait=False) + test_assert.status(r, 401) + assert r.json()[""detail""] == ""Invalid token."" " -/deploy/rest/start/{{UUID}}/,starting machine deployment on a machine which is in paused state,,"{ -""status"" : 400, -""message"" : 'Cannot perform start operation on paused state of a machine' -}","def test_deploy_start_paused_vm(deploy_start, run_api): +/ideploy/rest/pause/{UUID}/,Pause a Deployment which you are not an owner of but with Admin rights,,200 : job created,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_pause_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): """""" - starting a machine that is in paused state + Pausing the Island by Admin """""" - x, r = deploy_start - deploy_id = x[""UUID""] - run_api.deploy_pause(deploy_id, wait=True) - res = run_api.deploy_start(deploy_id) - test_assert.status(res, 400) - assert res.json()[""error""] == 'Cannot perform start operation on paused state of a machine' + # Admin check of Starting a deployment created by different user + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_pause(deploy_id) + test_assert.status(r, 201) " -/deploy/rest/start/{{UUID}}/,starting machine deployment when requested with invalid token,"{ - deploy_id = ""invalid"" -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_deploy_start_invalid_token(invalid_exec_api): +/ideploy/rest/pause/{UUID}/,Pause a Deployment which you are not an owner of and without Admin rights,,401: UnAuthorized,"@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_pause_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): """""" - starting machine deployment using invalid token + Pausing the Island by non-admin """""" - deploy_id = ""invalid"" - depl_start = invalid_exec_api.deploy_start(deploy_id, wait=False) - depl_json = depl_start.json() - test_assert.status(depl_start, 401) - assert depl_json[""detail""] == ""Invalid token."" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_pause(deploy_id) + test_assert.status(r, 403) " -/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine when Invalid UUID is provided,"{ - deploy_id = ""invalid"" -}","{ -""status"" : 404, -""message"" : ""Machine matching query does not exist"" -}","def test_deploy_stop_invalid_UUID(run_api): +/ideploy/rest/pause/{UUID}/,island deployment for a machine from running state to paused state,,"{ +""response"": machine paused +}","def test_ideploy_check_from_running_to_paused(run_api, ideploy_start): """""" - stopping machine deployment using a machine id for which machine does not exist + island deploy from running state to paused state """""" - - deploy_id = ""invalid"" - r = run_api.deploy_stop(deploy_id, wait=False) - test_assert.status(r, 404) - rjson = r.json() - assert ""Machine matching query does not exist"" in rjson[""error""], rjson - + x, r = ideploy_start + deploy_id = x[""deploy_uuid""] + res = run_api.ideploy_details(uuid=deploy_id).json() + initial_state = res[""state""] + if not initial_state == ""running"": + assert False, ""The machine is not in running state, current state of machine is %s"" % initial_state + run_api.ideploy_pause(uuid=deploy_id) + result = run_api.ideploy_details(uuid=deploy_id).json() + paused_network_segments = result['island']['network_segments'] + for pause in paused_network_segments: + if pause['name'] not in (""Default Public Segment"", ""HostOnly Segment""): + assert pause['status'] == ""inactive"", ""json |> %s"" % pause + final_state = result[""state""] + assert final_state == ""paused"", ""The error is %s"" % result " -/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine when requested with invalid token,"{ - deploy_id = ""invalid"" -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_deploy_stop_invalid_token(invalid_exec_api): +/ideploy/rest/pause/{UUID}/,Check for the transition of state from Running to Paused,,Working as intended," +def test_ideploy_check_from_running_to_paused(run_api, ideploy_start): """""" - stopping machine deployment with invalid token + test_ideploy_check_from_running_to_paused """""" - deploy_id = ""invalid"" - depl_stop = invalid_exec_api.deploy_stop(deploy_id, wait=False) - depl_json = depl_stop.json() - test_assert.status(depl_stop, 401) - assert depl_json[""detail""] == ""Invalid token."" + x, r = ideploy_start + deploy_id = x[""deploy_uuid""] + res = run_api.ideploy_details(uuid=deploy_id).json() + initial_state = res[""state""] + if not initial_state == ""running"": + assert False, ""The machine is not in running state, current state of machine is %s"" % initial_state + run_api.ideploy_pause(uuid=deploy_id) + result = run_api.ideploy_details(uuid=deploy_id).json() + paused_network_segments = result['island']['network_segments'] + for pause in paused_network_segments: + if pause['name'] not in (""Default Public Segment"", ""HostOnly Segment""): + assert pause['status'] == ""inactive"", ""json |> %s"" % pause + final_state = result[""state""] + assert final_state == ""paused"", ""The error is %s"" % result " -/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine when valid UUID is provided and machine is in running state,,"{ -""status"" : 201 -}","def test_deploy_stop_self(deploy_stop): +/ideploy/rest/resume/{UUID}/,successful island deployment for a machine from paused state to running state,,"{ +""response"": machine paused +}","def test_ideploy_resume_checking_state_paused_to_running(ideploy_start, run_api): """""" - stopping machine deployment + Check for the transition of state from Paused to Running """""" - x, r = deploy_stop - test_assert.status(r, 201)" -/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine when valid UUID is provided and machine is in stopped state,,"{ -""status"" : 400, -""message"" : ""Cannot perform power off operation on stopped state of a machine"" -}","def test_deploy_stop_already_stopped_vm(run_api, deploy_stop): + res, r = ideploy_start + deploy_id = res[""deploy_uuid""] + run_api.ideploy_pause(deploy_id) + paused_r = run_api.ideploy_details(deploy_id) + paused_rjson = paused_r.json() + assert paused_rjson['state'] == 'paused', ""json |> %s"" % paused_rjson + run_api.ideploy_resume(deploy_id) + resume_r = run_api.ideploy_details(deploy_id) + resume_rjson = resume_r.json() + assert resume_rjson['state'] == 'running', ""json |> %s"" % resume_rjson +" +/ideploy/rest/resume/{UUID}/,starting the segment of the island by manager when he have rights over the server,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_ideploy_segment_start_manager_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): """""" - stopping machine deployment when machine in stopped state + Starting the segment of the Island by manager when have right on server """""" - x, result = deploy_stop - deploy_id = x[""UUID""] - response = run_api.deploy_stop(deploy_id) - test_assert.status(response, 400) - rjson = response.json()[""error""] - assert rjson == ""Cannot perform power off operation on stopped state of a machine"" + # When the user is not part of the group that the manager manages + seg_id = custom_ilib_admin_operations + r = run_api.ideploy_segment_start(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + run_api.ideploy_segment_stop(seg_id) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + seg_id = custom_ilib_non_admin_operations + r = run_api.ideploy_segment_start(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + run_api.ideploy_segment_stop(seg_id) " -/deploy/rest/stop/{{UUID}}/,stopping deployment of a machine without authorization,"{ - deploy_id = ""invalid"" -}","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_deploy_stop_without_authorization(anonymous_exec_api): +/ideploy/rest/resume/{UUID}/,snapshotting the deployed island machine,,"{ +""status"":201 +}","def test_ideploy_resume(ideploy_resume): """""" - stopping machine deployment without authorization + When provided with valid uuid """""" - deploy_id = ""invalid"" - depl_stop = anonymous_exec_api.deploy_stop(deploy_id, wait=False) - depl_json = depl_stop.json() - test_assert.status(depl_stop, 401) - assert depl_json[""detail""] == ""Authentication credentials were not provided."" + r = ideploy_resume + test_assert.status(r, 201) " -/group/rest/add-server/{id}/,"addition of server to group when invalid group id is provided. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +/ideploy/rest/resume/{UUID}/,resuming the deployment of island machine when requested with invalid token,"{ +uid =""invalid"" +} + ","{ -group_id=0, -}","{ - ""status"": 400, - ""message"": ""Group does not exist"" -}","def test_group_server_add_with_invalid_id(run_api, server_list): + ""status"" : 404, + ""message"" : ""Deployed island doesnt exist."" +}","def test_ideploy_resume_invalid_uuid(run_api): """""" - Add Server in Group with invalid group id + When provided with invalid uuid """""" - r = server_list - res = r.json() - list_server = [result['UUID'] for result in res['results']] - server_group_list = { - ""servers_list"": list_server - } - r = run_api.group_add_server(server_group_list, group_id=0) - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - res = r.json() - assert res['error'] == ""Group does not exist"" - test_assert.status(r, 400) + uuid = ""invalid"" + r = run_api.ideploy_resume(uuid) + test_assert.status(r, 404) + rjson = r.json() + assert rjson[""error""] == ""Deployed Island Doesn't Exist"", 'The error is %s' % rjson[""error""] " -/group/rest/add-server/{id}/,addition of server to group when requested with invalid token,"server_group_list = { ""servers_list"": [""0""] }","{ -""status"" : 401, -""message"" : ""Invalid token"" -} -"," -def test_group_server_add_with_invalid_token(invalid_exec_api): +/ideploy/rest/resume/{UUID}/,resuming the deployment of a deployed island machine without authorization,"{ +uuid =""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_resume_without_authorization(anonymous_exec_api): """""" - Add Server in Group with invalid token + When provided without authorization """""" - server_group_list = { - ""servers_list"": ['0'] - } - r = invalid_exec_api.group_add_server(server_group_list, group_id=0) - res = r.json() + uuid = ""invalid"" + r = anonymous_exec_api.ideploy_resume(uuid, wait=False) test_assert.status(r, 401) - assert res['detail'] == ""Invalid token."" + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""The error message is %s"" % rjson['detail'] + + " -/group/rest/add-server/{id}/,addition of server to group without Authorization ,"server_group_list = { - ""servers_list"": [""0""] - }","{ +/ideploy/rest/resume/{UUID}/,resuming the deployment of a deployed island machine when requested with invalid token,"{ +uuid =""invalid"" +}","{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -} -","def test_group_server_add_without_authorization(anonymous_exec_api): +""message"" : ""Invalid token"" +}","def test_ideploy_resume_invalid_token(invalid_exec_api): """""" - Add Server in Group without Authorization + When provided with invalid token """""" - - server_group_list = { - ""servers_list"": [""0""] - } - r = anonymous_exec_api.group_add_server(server_group_list, group_id=0) - res = r.json() + uuid = ""invalid"" + r = invalid_exec_api.ideploy_resume(uuid, wait=False) test_assert.status(r, 401) - assert res['detail'] == ""Authentication credentials were not provided."" + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""The error message is %s"" % rjson['detail'] " -/group/rest/add-server/{id}/,"addition of server when both group and server id are invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","server_group_list = { - ""servers_list"": [""0""] - }","{ -""status"" : 400 -""message"" : ""Group does not exist"" -}","def test_group_server_add_invalid_server_id_and_grp_id(run_api): +/ideploy/rest/resume/{UUID}/,Resume a Deployment which you are not an owner of but with Admin rights,,201 : job created," +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_resume_with_admin_rights(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): """""" - Add Server in Group with invalid server id and invalid group id + Resume a Deployment which you are not an owner of but with Admin rights """""" - server_group_list = { - ""servers_list"": [""0""] - } - r = run_api.group_add_server(server_group_list, group_id=0) - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - res = r.json() - assert res['error'] == ""Group does not exist"" - test_assert.status(r, 400) - + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_resume(deploy_id) + test_assert.status(r, 201) " -/group/rest/add-server/{id}/,"addition of server when both group and server id are valid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -status : 403 / 202 -}","PARAMETERS = [{""action"": GROUP_ADD_SERVER}] - -@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) -def test_group_server_add(run_api, custom_group_admin_operations): +/ideploy/rest/resume/{UUID}/,Resume a Deployment which you are not an owner of and without Admin rights,,403: UnAuthorized,"@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_resume_without_owner_and_admin(skip_if_admin, custom_ilib_admin_operations, run_api): """""" - Add Server in Group + Resume a Deployment which you are not an owner of and without Admin rights """""" - param, ret = custom_group_admin_operations - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(ret, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(ret, 202) - + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_resume(deploy_id) + test_assert.status(r, 403) + rjson = r.json() + assert rjson['error'] == 'You do not have permission to perform this action.', ""json |> %s"" % rjson " -/group/rest/add-server/{id}/,"addition of server when server id is invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ -server_group_list = { - ""servers_list"": [""0""] - } -}","{ -status : 400/404 -}","@pytest.mark.skip(reason=""Skipping this test because of it is returning 207 in place 400/404"") -@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) -def test_group_server_add_with_invalid_server_id(run_api, custom_group_admin_operations): +/ideploy/rest/resume/{UUID}/,island deployment for a machine from paused state to running state by a manager who does not have permissions over the server,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_ideploy_resume_by_manager_without_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): """""" - Add Server in Group with invalid server id + Ideploy resume by manager without server right """""" - server_group_list = { - ""servers_list"": [""0""] - } - params, r = custom_group_admin_operations - res = r.json() - group_id = res['id'] + # When Manager manages the user but not the server + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_resume(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) - r = run_api.group_add_server(server_group_list, group_id) - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - status_code = r.status_code - assert status_code == 400 or status_code == 404 -" -/group/rest/add-user/{id}/,adding user id to group without Authorization ,"{group_id=0, -user_ids=[0] -}","{status"":401, -""message"":'Authentication credentials were not provided.' -}","def test_group_add_user_without_authorization(anonymous_exec_api): - """""" - Adding user id into group without Authorization - """""" - template, r = anonymous_exec_api.group_add_user(group_id=0, user_ids=[0]) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == 'Authentication credentials were not provided.' -" -/group/rest/add-user/{id}/,"providing invalid Group Id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{""group_id"" : 0}","{""status"":400, -""message"": ""'Group does not exist'"" -}"," -def test_group_add_user_with_invalid_group_id(run_api, admin_exec_api): + # when manager does not manage the user nor the server + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_resume(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) +" +/ideploy/rest/segment_start/,starting the segment of the island by manager when he does not have right over the server,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_ideploy_segment_start_manager_no_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): """""" - Adding users into invalid group id + Starting the segment of the Island by manager when have no right on server """""" - user_result = admin_exec_api.user_list() - res = user_result.json() - user_ids = [result['id'] for result in res['results']] - template, r = run_api.group_add_user(group_id=0, user_ids=user_ids) - if run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - res = r.json() - assert res['error'] == 'Group does not exist' - test_assert.status(r, 400) -" -/group/rest/add-user/{id}/,"providing invalid User Id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{""user_id"" : 0}","{""status"" : 207 }","PARAMETERS = [{""action"": GROUP_ADD_USER}] + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + seg_id = custom_ilib_admin_operations + r = run_api.ideploy_segment_start(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + run_api.ideploy_segment_stop(seg_id) -@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) -def test_group_add_user_with_invalid_user_id(run_api, custom_group_admin_operations): + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + seg_id = custom_ilib_non_admin_operations + r = run_api.ideploy_segment_start(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + run_api.ideploy_segment_stop(seg_id) +" +/ideploy/rest/segment_stop/,"stopping the segment of an island machine by a manager type of user, where the manager have rights over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_ideploy_segment_stop_manager_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): """""" - Adding invalid user id into group + Stopping the segment of an Island by manager when have right on server """""" - params, r = custom_group_admin_operations - res = r.json() - group_id = res['id'] - template, r = run_api.group_add_user(group_id, user_ids=[0]) - if run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 207) + # When the user is not part of the group that the manager manages + seg_id = custom_ilib_admin_operations + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + seg_id = custom_ilib_non_admin_operations + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) " -/group/rest/add-user/{id}/,providing valid Group Id and User Id,"{ -""search"" :""vivekt"" -}","{ -""status"" : 202, -""response"" : Accepted","@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD_MANAGER_AS_MANAGER}], indirect=True) -def test_group_add_user_by_manager(skip_if_not_manager, custom_group_admin_operations, run_api): +/ideploy/rest/segment_stop/,"stopping the segment of an island machine by a manager type of user, where the manager do not have rights over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_ideploy_segment_stop_manager_no_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): """""" - When provided user_id and group_id + Stopping the segment of an Island by manager when have no right on server """""" - template, r = custom_group_admin_operations - group_id = template['group_id'] - user_r = run_api.user_list({'search': 'vivekt'}) - user_id = user_r.json()['results'][0]['id'] - params, result = run_api.group_add_user(group_id, user_ids=[user_id]) - test_assert.status(result, 202) + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + seg_id = custom_ilib_admin_operations + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + seg_id = custom_ilib_non_admin_operations + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + + " -/group/rest/add-user/{id}/,requesting of user addition to group with invalid token,"{group_id=0, -user_ids=[0] -}","{""status"":401, -""message"": ""Invalid Token"" -}","def test_group_add_user_with_invalid_token(invalid_exec_api): +/ideploy/rest/segment_stop/,"stopping the segment of an island machine by a manager type of user, where the manager do not have rights over the servers",,"{ +""status"" : 201, +""response"": segment stopped +}","def test_ideploy_segment_stop_valid_uuid(run_api, ideploy_details): """""" - Adding user id into group with invalid token + Stopping the segment of the Island """""" - template, r = invalid_exec_api.group_add_user(group_id=0, user_ids=[0]) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == 'Invalid token.' + param, result = ideploy_details + seg_id = result.json()[""island""][""network_segments""][2][""uuid""] + r = run_api.ideploy_segment_start(seg_id) + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, 201) " -/group/rest/bulkdelete/,performing bulk delete without Authorization ,"{ -group_id_list = [0] -}","{ -""status"":401, -""message"":'Authentication credentials were not provided.' -}","def test_group_bulk_delete_without_authorization(anonymous_exec_api): +/ideploy/rest/segment_stop/,stopping the segment of an island by admin user,,"{ +""status"" : 201, +""response"": segment stopped +}","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_segment_stop_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): """""" - delete group in bulk without Authorization + Stopping the segment of an Island by Admin """""" - r = anonymous_exec_api.group_bulk_delete(group_id_list=[0]) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == 'Authentication credentials were not provided.' + # Admin check of Stopping a deployment created by different user + seg_id = custom_ilib_non_admin_operations + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, 201) " -/group/rest/bulkdelete/,"providing invalid ids. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ -invalid_group_list = [0] -}","{""status"":400/403, -""message"":Bad Request}","@pytest.mark.skip(reason=""Skipping this test because it is returning 207 at the place of 400"") -def test_group_bulk_delete_with_invalid_id(run_api): +/ideploy/rest/segment_stop/,stopping the segment of an island by a non-admin user,,"{ +""status"" : 403 +}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_segment_stop_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): """""" - delete groups in bulk with invalid id + Stopping the segment of an Island by non-admin """""" - invalid_group_list = [0] - r = run_api.group_bulk_delete(invalid_group_list) - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 400) + # Non-admin check of Stopping a deployment createdan different user + seg_id = custom_ilib_admin_operations + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, 403) " -/group/rest/bulkdelete/,"providing valid group ids. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{""status"": 202/403, -""message"":Accepted}","PARAMETERS = [{""action"": GROUP_BULK_DELETE}] +/ideploy/rest/segment_stop/,"stopping the segment of a island machine, where the segment is non-deployable. check the user type before performing the operation, only admin user type have the permission to perform such operations.",,"{ +""status"" : 400, +""message"" : ""No operation is allowed on the segment, as it is part of the library"""" -@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) -def test_group_bulk_delete(run_api, custom_group_admin_operations): +}","def test_ideploy_stop_non_deployment_segment(ilibrary_details, run_api): """""" - delete group in bulk + Stop a Segment which is part of Island (not Deployment) """""" - template, r = custom_group_admin_operations - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 202) + r = ilibrary_details + rjson = r.json() + uuid = rjson['network_segments'][2]['uuid'] + res = run_api.ideploy_segment_start(uuid) + res = run_api.ideploy_segment_stop(uuid) + if run_api.user_type == USER_TYPE['non_admin']: + test_assert.status(res, 403) + result = res.json() + assert result['error'] == ""You do not have permission to perform this action."" + else: + test_assert.status(res, 400) + result = res.json() + assert result['error'] == f""No operation is allowed on {rjson['network_segments'][2]['name']} , as it is part of the library"" + + " -/group/rest/bulkdelete/,requesting bulk deletion of groups with invalid token,"{ -group_id_list=[0] +/ideploy/rest/segment_stop/,stopping the segment of a island machine without authorization,"{ +seg_id = ""invalid"" }","{ -""status"":401, -""message"": ""Invalid Token"" -}","def test_group_bulk_delete_with_invalid_token(invalid_exec_api): +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" + +}","def test_ideploy_segment_stop_without_authorization(anonymous_exec_api): """""" - delete group in bulk with invalid token + Stopping the segment of the Island without authorization """""" - r = invalid_exec_api.group_bulk_delete(group_id_list=[0]) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == 'Invalid token.' + seg_id = ""invalid"" + seg_stop = anonymous_exec_api.ideploy_segment_stop(seg_id, wait=False) + seg_json = seg_stop.json() + test_assert.status(seg_stop, 401) + assert seg_json[""detail""] == ""Authentication credentials were not provided."" " -/group/rest/delete/{id}/,"deleting group with valid group id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ - ""status"": 204 -}","PARAMETERS = [{""action"": GROUP_DELETE}] +/ideploy/rest/segment_stop/,stopping the segment of a island machine using invalid token,"{ +seg_id = ""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token."" -@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) -def test_group_delete(run_api, custom_group_admin_operations): +}","def test_ideploy_segment_stop_invalid_token(invalid_exec_api): """""" - delete a group + Stopping the segment of the Island using invalid token """""" - template, r = custom_group_admin_operations - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 204) + seg_id = ""invalid"" + seg_stop = invalid_exec_api.ideploy_segment_stop(seg_id, wait=False) + seg_json = seg_stop.json() + test_assert.status(seg_stop, 401) + assert seg_json[""detail""] == ""Invalid token."" " -/group/rest/delete/{id}/,group deletion without authorization ,"{ -id=0 +/ideploy/rest/segment_stop/,stopping the segment of a island machine using invalid deployment uuid,"{ +seg_id = ""invalid"" }","{ - ""status"": 401, - ""message"" : 'Authentication credentials were not provided.' - -}"," -def test_group_delete_without_authorization(anonymous_exec_api): +""status"" : 404, +""response"": Failure +}","def test_ideploy_segment_stop_invalid_uuid(run_api): """""" - Delete group without authorization + Stopping the segment of the Island """""" - r = anonymous_exec_api.group_delete(id=0) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == 'Authentication credentials were not provided.' + seg_id = ""invalid"" + r = run_api.ideploy_segment_stop(seg_id) + test_assert.status(r, 404) +" +/ideploy/rest/segment_stop/,"stopping the segment of a island machine by changing the state of the machine from ""stopped"" to ""stopped"", where the island state and segment state is already in ""stopped"" state",,"{ +""response"" :operation successful +}","def test_ideploy_segment_stop_check_state_of_segments(ideploy_details, run_api): + """""" + Check for the transition of state from Stopped to Stopped (if Island state was Stopped and all Segments are Stopped) + """""" + param, result = ideploy_details + deploy_id = param[""deploy_uuid""] + + seg_id = result.json()[""island""][""network_segments""][2][""uuid""] + result = run_api.ideploy_segment_stop(seg_id) + result = run_api.ideploy_details(deploy_id) + rjson = result.json() + segments = [segment for segment in rjson[""island""][""network_segments""]][2:4] + + assert rjson['state'] == 'stopped', ""The error is %s"" % rjson + machines = rjson['machines'] + for machine in machines: + assert machine['state'] == 'stopped', ""The error is %s"" % (machine) + for segment in segments: + assert segment['status'] == 'inactive', ""The error is %s"" % (segment) " -/group/rest/delete/{id}/,"providing invalid id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ -id=0 +/ideploy/rest/segment_stop/,"stopping the segment of a island machine by changing the state of the machine from ""running"" to ""mixed"", where the island is already in running state","{ + ""machine_list"": machine_uuids, + ""op"": ""poweroff"" }","{ -""status"": 400/403, -""message"": ""Group that matches the query does not exist"" -}","def test_group_delete_with_invalid_id(run_api): +""response"" :operation successful +}","def test_ideploy_segment_stop_check_state_running_to_mixed(run_api, ideploy_start): """""" - Delete a group with invalid id + Check for the transition of state from Running to Mixed (if Island state was Running) """""" - r = run_api.group_delete(id=0) - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - res = r.json() - assert res['error'] == 'Group matching query does not exist.' - test_assert.status(r, 400) + x, r = ideploy_start + deploy_id = x[""deploy_uuid""] + result = run_api.ideploy_details(deploy_id) + assert result.json()['state'] == ""running"", ""The error is %s"" % (result.json()['state']) + + machine_uuids = [mc[""uuid""] for mc in result.json()[""machines""]] + deploy_bulkops_params = { + ""machine_list"": machine_uuids, + ""op"": ""poweroff"" + } + run_api.deploy_bulkops(deploy_bulkops_params) + seg_ids = [segment[""uuid""] for segment in result.json()[""island""][""network_segments""]][2:4] + run_api.ideploy_segment_start(seg_ids[0]) + run_api.ideploy_segment_start(seg_ids[1]) + run_api.ideploy_segment_stop(seg_ids[0]) + r = run_api.ideploy_details(deploy_id) + rjson = r.json() + assert rjson['state'] == ""mixed"", ""The error is %s"" % (rjson) + + " -/group/rest/delete/{id}/,requesting group deletion with invalid token,"{ -id=0 -}","{ - ""status"": 401, - ""message"": ""Invalid Token"" -}","def test_group_delete_with_invalid_token(invalid_exec_api): +/ideploy/rest/segment/start/{UUID}/,starting the segment of deployed island machine,,"{ +""status"":201 +}","def test_ideploy_segment_start_self(ideploy_start): """""" - Delete group with invalid token + Start the Island """""" - r = invalid_exec_api.group_delete(id=0) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == 'Invalid token.' + x, r = ideploy_start + test_assert.status(r, 201) " -/group/rest/remove-server/{id}/,"removing server from group when both group and server id are invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ -group_id = 0 +/ideploy/rest/segment/start/{UUID}/,starting the segment of a deployed island machine where the segment UUID does not exist,"{ +uid =""invalid"" }","{ -""status"" : 400, -""message"" : ""Group does not exist"" -}","def test_group_remove_server_invalid_server_id_and_grp_id(run_api): + ""status"" : 404, + ""message"" : ""Deployed island doesnt exist."" +}"," +def test_ideploy_segment_start_with_invalid_uuid(run_api): """""" - Remove server from group when both server id and group id is invalid + segment UUID does not exist """""" - r = run_api.group_remove_server(group_id=0, params={""servers_list"": [""0""]}) - if run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - res = r.json() - assert res['error'] == ""Group does not exist"" - test_assert.status(r, 400) + uid = ""invalid"" + r = run_api.ideploy_start(uuid=uid) + test_assert.status(r, 404) + assert r.json()[""error""] == ""Deployed Island Doesn't Exist"" " -/group/rest/remove-server/{id}/,"removing server from group when both group and server id are valid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -""status"" : 403 / 202 -}","PARAMETERS = [{""action"": GROUP_ADD_SERVER}] +/ideploy/rest/segment/start/{UUID}/,"starting island deployment for all segments of an island machine +","machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + }, + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg1"" + } + ], + } + } +params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1], + }, + ""network_segments"": { + ""add"": [ + { + ""name"": ""Seg1"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": ""Seg2"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + ] + } + }","{ +""response"": operation successful +}","def test_ideploy_deployment_starts_all_segment(run_api): + """""" + starting island deployment for all segments + """""" + networks = template_networks() + params1, r1 = run_api.library_add_new_vm(networks=networks) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + }, + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg1"" + } + ], + } -@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) -def test_group_remove_server(skip_if_manager, run_api, custom_group_admin_operations): + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1], + }, + ""network_segments"": { + ""add"": [ + { + ""name"": ""Seg1"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": ""Seg2"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + island_uuid = r.json()[""uuid""] + res = run_api.ideploy_deploy(uuid=island_uuid) + deploy_uuid = res.json()[""deploy_uuid""] + run_api.ideploy_start(deploy_uuid) + r_details = run_api.ideploy_details(deploy_uuid) + result = r_details.json() + segment_list = result[""island""][""network_segments""] + for segment in segment_list: + if segment[""status""] != ""active"": + assert False, ""The error is %s"" % result + machine_uuids = [mc[""uuid""] for mc in r_details.json()[""machines""]] + deploy_bulkops_params = { + ""machine_list"": machine_uuids, + ""op"": ""poweroff"" + } + run_api.deploy_bulkops(deploy_bulkops_params) + run_api.ideploy_shutdown(deploy_uuid) + run_api.ideploy_delete(uuid=deploy_uuid) + run_api.ilibrary_delete(uuid=island_uuid) + run_api.library_delete(r1.json()[""uuid""]) +" +/ideploy/rest/segment/start/{UUID}/,starting island deployment an island machine from stopped state to running state,,"{ +""response"": operation successful +}","def test_ideploy_check_from_stopped_to_running(run_api, ideploy_start): """""" - Remove Server in Group + test_ideploy_check_from_stopped_to_running """""" - params, r = custom_group_admin_operations - group_id = params[""group_id""] - server_list = params[""server_list""] - r = run_api.group_remove_server(group_id, params={""servers_list"": server_list}) - if run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 202) + x, r = ideploy_start + deploy_id = x[""deploy_uuid""] + result = run_api.ideploy_details(uuid=deploy_id).json() + final_state = result[""state""] + assert final_state == ""running"", 'The error is %s' % result " -/group/rest/remove-server/{id}/,"removing server from group when invalid group id is provided. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ - ""servers_list"": [""0""], - ""group_id"" = 0 - }","{ -""status"" : 400 -""message"" : ""Group does not exist"" -}","def test_group_remove_server_invalid_group_id(run_api, server_list): +/ideploy/rest/segment/start/{UUID}/,starting a segment of a deployed island machine without Authorization,"{ +uid =""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_segment_start_without_authorization(anonymous_exec_api): """""" - Remove server from group when group id is invalid + test_ideploy_without_authorization """""" - r = server_list - res = r.json() - list_server = [result['UUID'] for result in res['results']] - servers_list = { - ""servers_list"": list_server - } - r = run_api.group_remove_server(group_id=0, params={""servers_list"": servers_list}) - if run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - res = r.json() - assert res['error'] == ""Group does not exist"" - test_assert.status(r, 400) + uid = ""invalid"" + r = anonymous_exec_api.ideploy_start(uuid=uid, wait=False) + test_assert.status(r, 401) + assert r.json()[""detail""] == ""Authentication credentials were not provided."" " -/group/rest/remove-server/{id}/,removing server from group when requested with invalid token,"{ -group_id = 0 +/ideploy/rest/segment/start/{UUID}/,starting a segment of a deployed island machine when requested with invalid token,"{ +uid =""invalid"" }","{ ""status"" : 401, ""message"" : ""Invalid token"" -} -","def test_group_remove_server_with_invalid_token(invalid_exec_api): +}","def test_ideploy_segment_start_with_invalid_token(invalid_exec_api): """""" - Remove server from group with invalid token + test_ideploy_with_invalid_token """""" - r = invalid_exec_api.group_remove_server(group_id=0, params={""servers_list"": ['0']}) - res = r.json() + uid = ""invalid"" + r = invalid_exec_api.ideploy_start(uuid=uid, wait=False) test_assert.status(r, 401) - assert res['detail'] == ""Invalid token."" + assert r.json()[""detail""] == ""Invalid token."" " -/group/rest/remove-server/{id}/,"removing server from group when server id is invalid.Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -""status"" : 400/404 -}","@pytest.mark.skip(reason=""Skipping this test because of it is returning 207 in place 400/404"") -@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) -def test_group_remove_server_invalid_server_id(run_api, custom_group_admin_operations): +/ideploy/rest/snapshot/{UUID}/,taking snapshot of an island whenit is in running state,,"{ + ""status"" : 400, + ""message"" : ""Island snapshot is only allowed when all machines are in stopped state"" +}","def test_ideploy_snapshot_when_island_in_running_state(ideploy_start, run_api): """""" - Remove server from group when server id is invalid + taking snapshot when island is in running state """""" - params, r = custom_group_admin_operations - res = r.json() - group_id = res['id'] - r = run_api.group_remove_server(group_id, params={""servers_list"": [""0""]}) - if run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - status_code = r.status_code - assert status_code == 400 or status_code == 404 + x, r = ideploy_start + isl_id = x['deploy_uuid'] + r, rtask_details = run_api.ideploy_snapshot(uuid=isl_id) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Island snapshot is only allowed when all machines are in stopped state"", ""|> Json %s"" % rjson " -/group/rest/remove-server/{id}/,removing server from group without Authorization ,"{ -group_id = 0 +/ideploy/rest/snapshot/{UUID}/,taking island of a deployed island mahcine without authorization,"{ +uid =""invalid"" }","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -} -","def test_group_remove_server_without_authorization(anonymous_exec_api): + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_snapshot_without_authorization(anonymous_exec_api): """""" - Remove server from group without authorization + taking snapshot of island without_authorization """""" - r = anonymous_exec_api.group_remove_server(group_id=0, params={""servers_list"": ['0']}) - res = r.json() + uid = ""invalid"" + r, r_details = anonymous_exec_api.ideploy_snapshot(uuid=uid, wait=False) test_assert.status(r, 401) - assert res['detail'] == ""Authentication credentials were not provided."" + assert r.json()[""detail""] == ""Authentication credentials were not provided."" " -/group/rest/remove-user/{id}/,"deleting user from group when both group and user id are invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +/ideploy/rest/snapshot/{UUID}/,snapshotting the deployment of island machine when requested with invalid token,"{ +uid =""invalid"" +} + ","{ -group_id = 0, -""users_list"": [0] -}","{ -""status"" : 400, -""message"" : ""Group does not exist"" -}","def test_group_remove_user_invalid_grp_and_user_id(run_api): + ""status"" : 404, + ""message"" : ""Deployed island doesnt exist."" +}","def test_ideploy_snapshot_with_invalid_uuid(run_api): """""" - Remove user from group when invalid user id and group id are provided + snapshotting the deployment of island machine when requested with invalid token """""" - r = run_api.group_remove_user(group_id=0, params={""users_list"": [0]}) - if run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: - res = r.json() - assert res['error'] == ""Group does not exist"" - test_assert.status(r, 400) + uid = ""invalid"" + r, r_details = run_api.ideploy_snapshot(uuid=uid, wait=False) + test_assert.status(r, 404) + assert r.json()[""error""] == ""Deployed Island Doesn't Exist"" " -/group/rest/remove-user/{id}/,"deleting user from group when both group and server id are valid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ -group_id = 0 -}","{ -""status"" : 403 / 202 -}","@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) -def test_group_remove_user(skip_if_manager, run_api, custom_group_admin_operations): +/ideploy/rest/snapshot/{UUID}/,snapshotting the deployed island machine,,"{ +""status"":201 +}","def test_ideploy_snapshot_self(ideploy_snapshot): """""" - Remove User in Group + Snapshot the Island """""" - params, r = custom_group_admin_operations - group_id = params[""group_id""] - user_list = params[""users_list""] - r = run_api.group_remove_user(group_id, params={""users_list"": user_list}) - if run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 202) - + r = ideploy_snapshot + test_assert.status(r, 201) " -/group/rest/remove-user/{id}/,"deleting user from group when invalid group id is provided. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ -group_id = 0 -}","{ -""status"" : 400 / 403, -""message"" : ""Group does not exist"" -}","def test_group_remove_user_with_invalid_grp_id(run_api, admin_exec_api): +/ideploy/rest/snapshot/{UUID}/,Snapshotting a deployment creates revision in Island,,,"def test_ideploy_snapshot_creates_revision(run_api, ideploy_deploy): """""" - Remove user from group when invalid group id is provided + test_ideploy_snapshot_creates_revision """""" - user_result = admin_exec_api.user_list() - res = user_result.json() - user_ids = [result['id'] for result in res['results']] - r = run_api.group_remove_user(group_id=0, params={""users_list"": user_ids}) - if run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: - res = r.json() - assert res['error'] == ""Group does not exist"" - test_assert.status(r, 400) + params, r = ideploy_deploy + deploy_id = r.json()[""deploy_uuid""] + r, rtask_details = run_api.ideploy_snapshot(uuid=deploy_id) + snapshotted_island_uid = rtask_details[""result""][""snapshotted_island_uuid""] + revision_count = run_api.ilibrary_details(uuid=snapshotted_island_uid).json()[""revision""] + run_api.ilibrary_delete(uuid=snapshotted_island_uid) + assert revision_count != 1, ""Revision count should not be 1, the error is {}"".format(rtask_details) " -/group/rest/remove-user/{id}/,deleting user from group when requested with invalid token,"{ -group_id = 0, -""users_list"": [0] -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -} -","def test_group_remove_user_with_invalid_token(invalid_exec_api): +/ideploy/rest/snapshot/{UUID}/,Snapshot a Deployment which you are not an owner of but with Admin rights,,"{ +""status"": 201 +}"," +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_snapshot_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): """""" - Remove user from group with invalid token + Snapshot the Island by Admin """""" - r = invalid_exec_api.group_remove_user(group_id=0, params={""users_list"": [0]}) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Invalid token."" + # Admin check of Starting a deployment created by different user + deploy_id = custom_ilib_non_admin_operations + r, rtask_details = run_api.ideploy_snapshot(deploy_id) + test_assert.status(r, 201) + run_api.ideploy_delete(deploy_id) + run_api.ilibrary_delete( + rtask_details['result']['snapshotted_island_uuid'], {}) " -/group/rest/remove-user/{id}/,"deleting user from group when user id is invalid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -""status"" : 400 / 404 -}","PARAMETERS = [{""action"": GROUP_ADD}] - - -@pytest.mark.skip(reason=""Skipping this test because of it is returning 207 in place 400/404"") -@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) -def test_group_remove_user_with_invalid_user_id(run_api, custom_group_admin_operations): +/ideploy/rest/snapshot/{UUID}/,Snapshot a Deployment which you are not an owner of and without Admin rights,,"{ +""status"": 403 +}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_snapshot_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): """""" - Remove user from group when invalid user id is provided + Snapshot the Island by non-admin """""" - params, r = custom_group_admin_operations - res = r.json() - group_id = res['id'] - r = run_api.group_remove_user(group_id, params={""users_list"": [0]}) - if run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: - status_code = r.status_code - assert status_code == 400 or status_code == 404 + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_ilib_admin_operations + r, rtask = run_api.ideploy_snapshot(deploy_id) + test_assert.status(r, 403) " -/group/rest/remove-user/{id}/,deleting user from group without Authorization ,"{ -group_id = 0, -""users_list"": [0] +/ideploy/rest/snapshot/{UUID}/,Check for the transition of state from Stopped to Snapshotting,,,"def test_snapshot_check_from_stop_to_snapshotting(run_api, ilibrary_add_new_island): + """""" + check state transition from stop to snapshotting + """""" + params, r = ilibrary_add_new_island + island_uuid = r.json()[""uuid""] + r = run_api.ideploy_deploy(island_uuid) + deploy_id = r.json()[""deploy_uuid""] + r, current_state = run_api.deployment_snapshot_state_details(id=deploy_id) + assert current_state == ""snapshotting"", ""Current state is in {}"".format(current_state) + temp = wait_to_complete(run_api, r.json()) + snapshotted_island_uid = temp[""result""][""snapshotted_island_uuid""] + run_api.ideploy_delete(uuid=deploy_id) + run_api.ilibrary_delete(uuid=snapshotted_island_uid) +" +/ideploy/rest/snapshot/{UUID}/,add description in param,"{ + ""description"" : ""This is Test description"" }","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided."" +""status"": 201 +}","def test_ideploy_snapshot_provided_description(ilibrary_add_new_island, run_api): + """""" + provide description when taking snaphot + """""" + params, r = ilibrary_add_new_island + island_uuid = r.json()[""uuid""] + r = run_api.ideploy_deploy(island_uuid) + deploy_id = r.json()['deploy_uuid'] + description = ""This is Test description"" + r, rtask_details = run_api.ideploy_snapshot(uuid=deploy_id, description=description) + snapshot_id = rtask_details[""result""][""snapshotted_island_uuid""] + test_assert.status(r, 201) + isl_details = run_api.ilibrary_details(snapshot_id).json() + assert isl_details['description'] == description, ""|> Json %s"" % isl_details + run_api.ideploy_delete(uuid=deploy_id) + run_api.ilibrary_delete(uuid=snapshot_id) +" +/ideploy/rest/start/{UUID}/,starting the deployment of island machine when requested with invalid token,"{ +uid =""invalid"" } -","def test_group_remove_user_without_authorization(anonymous_exec_api): + +","{ + ""status"" : 404, + ""message"" : ""Deployed island doesnt exist."" +}","def test_ideploy_with_invalid_uuid(run_api): """""" - Remove user from group without authorization + starting the deployment of island machine when requested with invalid token """""" - r = anonymous_exec_api.group_remove_user(group_id=0, params={""users_list"": [0]}) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Authentication credentials were not provided."" + uid = ""invalid"" + r = run_api.ideploy_start(uuid=uid) + test_assert.status(r, 404) + assert r.json()[""error""] == ""Deployed Island Doesn't Exist"" " -/ideploy/rest/delete/{UUID}/,Delete a Private Island which you are not an owner of and not as admin,,401: UnAuthorized,"@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_deploy_delete_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): +/ideploy/rest/start/{UUID}/,starting the deployment of island machine,,"{ +""status"":201 +}","def test_ideploy_start_self(ideploy_start): + """""" + Start the Island + """""" + x, r = ideploy_start + test_assert.status(r, 201) +" +/ideploy/rest/start/{UUID}/,"starting island machine by a manager , when the manager have right over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_ideploy_start_manager_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): """""" - Deleting the Deployed Island image by non-Admin + Start the Island by manager when have right on server """""" - # Non-admin check for Deleting the Deployed Island image created by different user. + # When the user is not part of the group that the manager manages deploy_id = custom_ilib_admin_operations - r = run_api.ideploy_delete(deploy_id) - test_assert.status(r, 403) + r = run_api.ideploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + run_api.ideploy_stop(deploy_id) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + run_api.ideploy_stop(deploy_id) " -/ideploy/rest/delete/{UUID}/,Delete a Private Island which you are not an owner of but as admin,,200: deleted,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_delete_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): +/ideploy/rest/start/{UUID}/,"starting island machine by a manager , when the manager do not right over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_ideploy_start_manager_no_server_right(skip_if_not_manager, + custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): """""" - Deleting the Deployed Island image by Admin + Start the Island by manager when have no right on server """""" - # Admin check for Deleting the Deployed Island image created by different user. + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + run_api.ideploy_stop(deploy_id) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server deploy_id = custom_ilib_non_admin_operations - r = run_api.ideploy_delete(deploy_id) - test_assert.status(r, 201) + r = run_api.ideploy_start(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + run_api.ideploy_stop(deploy_id) " -/ideploy/rest/deploy/{UUID}/,"deploy ""arch"":""aarch64"" type island and provide server which does not support it",,400: Bad request,"def test_ideploy_island_with_aarch64(run_api, server_list_arm): +/ideploy/rest/start/{UUID}/,Starting a deployment starts all Segments inside of it,,,"def test_ideploy_deployment_starts_all_segment(run_api): """""" - deploy ""arch"":""aarch64"" type island but server does not support it + test_ideploy_deployment_starts_all_segment """""" - params, r = run_api.library_add_new_vm(arch='aarch64') - rjson_lib = r.json() + networks = template_networks() + params1, r1 = run_api.library_add_new_vm(networks=networks) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + }, + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg1"" + } + ], + } - machine = { - ""uuid"": rjson_lib[""uuid""], - ""nic_update_id"": rjson_lib[""hw""][""networks""][0][""id""], - ""nic_delete_id"": rjson_lib[""hw""][""networks""][2][""id""] } - island_params = template_add_ilibrary_one_machine(machine=machine) - params, r_isl = run_api.ilibrary_add_new_island(params=island_params) - uuid = r_isl.json()[""uuid""] - server_list = server_list_arm - deploy_on = server_list if server_list else list(run_api.clm_my_servers.keys()) - r = run_api.ideploy_deploy(uuid, deploy_on=deploy_on, name=""test_island"") - if server_list: - test_assert.status(r, 200) - rjson = r.json() - deploy_id = rjson[""deploy_uuid""] - run_api.ideploy_delete(deploy_id) - else: - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""Either, Architecture of the selected Servers doesn't support 'aarch64' or the hvm_type of 'kvm' isn't supported"", ""|> Json %s"" % rjson - run_api.ilibrary_delete(uuid) - run_api.library_delete(rjson_lib[""uuid""]) + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1], + }, + ""network_segments"": { + ""add"": [ + { + ""name"": ""Seg1"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": ""Seg2"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + island_uuid = r.json()[""uuid""] + res = run_api.ideploy_deploy(uuid=island_uuid) + deploy_uuid = res.json()[""deploy_uuid""] + run_api.ideploy_start(deploy_uuid) + r_details = run_api.ideploy_details(deploy_uuid) + result = r_details.json() + segment_list = result[""island""][""network_segments""] + for segment in segment_list: + if segment[""status""] != ""active"": + assert False, ""The error is %s"" % result + machine_uuids = [mc[""uuid""] for mc in r_details.json()[""machines""]] + deploy_bulkops_params = { + ""machine_list"": machine_uuids, + ""op"": ""poweroff"" + } + run_api.deploy_bulkops(deploy_bulkops_params) + run_api.ideploy_shutdown(deploy_uuid) + run_api.ideploy_delete(uuid=deploy_uuid) + run_api.ilibrary_delete(uuid=island_uuid) + run_api.library_delete(r1.json()[""uuid""]) + " -/ideploy/rest/deploy/{UUID}/,Deploy a Private Island which you are not an owner of and not as admin,,401: UnAuthorized,"@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_deploy_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): +/ideploy/rest/start/{UUID}/,starting a deployed island machine without authorization,"{ +uid =""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_start_without_authorization(anonymous_exec_api): """""" - Deploying an Island Image by Non-admin + test_ideploy_without_authorization + """""" + uid = ""invalid"" + r = anonymous_exec_api.ideploy_start(uuid=uid, wait=False) + test_assert.status(r, 401) + assert r.json()[""detail""] == ""Authentication credentials were not provided."" +" +/ideploy/rest/start/{UUID}/,Start a Deployment which you are not an owner of but with Admin rights,,"{ +""status"": 201 +}","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_start_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Start the Island by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_start(deploy_id) + test_assert.status(r, 201) + run_api.ideploy_stop(deploy_id) +" +/ideploy/rest/start/{UUID}/,Start a Deployment which you are not an owner of and without Admin rights,,"{ +""status"": 403 +}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_start_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): + """""" + Start the Island by non-admin """""" # Non-admin check of Starting a deployment created by different user - lib_id = custom_ilib_admin_operations - r = run_api.ideploy_deploy(lib_id) + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_start(deploy_id) test_assert.status(r, 403) + run_api.ideploy_stop(deploy_id) " -/ideploy/rest/deploy/{UUID}/,Deploy a Public Island with Admin rights but not owner,,200: deployed,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_deploy_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): +/ideploy/rest/start/{UUID}/,Check for the transition of state from Stopped to Running,,Working as intended,"def test_ideploy_check_from_stopped_to_running(run_api, ideploy_start): """""" - Deploying an Island Image by Admin + state transition from stopped to running """""" - # Admin check of Starting a deployment created by different user - lib_id = custom_ilib_non_admin_operations - r = run_api.ideploy_deploy(lib_id) - x = r.json() + x, r = ideploy_start deploy_id = x[""deploy_uuid""] - test_assert.status(r, 200) - run_api.ideploy_delete(deploy_id) + result = run_api.ideploy_details(uuid=deploy_id).json() + final_state = result[""state""] + assert final_state == ""running"", 'The error is %s' % result " -/ideploy/rest/deploy/{UUID}/,Deploy with a name,,200: Returns job_UUID," -def test_ideploy_deploy_with_name(run_api, ilibrary_add_new_island): - params, r = ilibrary_add_new_island - uuid = r.json()[""uuid""] - r = run_api.ideploy_deploy(uuid, name=""test_island"") - x = r.json() - deploy_id = x[""deploy_uuid""] - r = run_api.ideploy_details(deploy_id) - rjson = r.json() - assert ""test_island"" in rjson[""island""][""name""], rjson - r = run_api.ideploy_delete(deploy_id) - test_assert.status(r, 201) +/ideploy/rest/stop/{UUID}/,stopping the deployment of island machine using valid existing uuid,,"{ +""status"":201 +}","def test_ideploy_stop(ideploy_start, run_api): + """""" + When provided with valid uuid + """""" + params, r = ideploy_start + deploy_id = params['deploy_uuid'] + res = run_api.ideploy_stop(deploy_id) + test_assert.status(res, 201) " -/ideploy/rest/deploy/{UUID}/,provide tags in param,,200: Deployed with tags,"def test_ideploy_deploy_with_tags(ilibrary_add_new_island, run_api): +/ideploy/rest/stop/{UUID}/,stopping the deployment of island machine using invalid uuid,"{ +deploy_id =""invalid"" +} + +","{ + ""status"" : 404, + ""message"" : ""Deployed island doesnt exist."" +}","def test_ideploy_stop_invalid_uuid(run_api): """""" - provide tags in params + When Island Deployment uuid doesnot exist """""" - params, r = ilibrary_add_new_island - uuid = r.json()[""uuid""] - tag_name = ""custom_tags"" - params = { - ""tag_list"": [tag_name] - } - r = run_api.ideploy_deploy(uuid, **params) + deploy_id = ""invalid"" + r = run_api.ideploy_stop(deploy_id, error=True) + test_assert.status(r, 404) rjson = r.json() - test_assert.status(r, 200) - isl_details = run_api.ideploy_details(rjson['deploy_uuid']).json() - all_tags = [tag['value'] for tag in isl_details['tags']] - assert tag_name in all_tags, ""|> Json %s"" % rjson - run_api.ideploy_delete(rjson['deploy_uuid']) + assert rjson['error'] == ""Deployed Island Doesn't Exist"", ""The error meassage is %s"" % rjson " -/ideploy/rest/deploy/{UUID}/,Select a group for deployment,,200: deployed,"def test_ideploy_deploy_select_group(run_api, ilibrary_add_new_island): - params, r = ilibrary_add_new_island - uuid = r.json()[""uuid""] - grp_list = list(run_api.clm_my_groups.keys()) - r = run_api.ideploy_deploy(uuid, group_list=grp_list) - x = r.json() - deploy_id = x[""deploy_uuid""] - r = run_api.ideploy_delete(deploy_id) - test_assert.status(r, 201) +/ideploy/rest/stop/{UUID}/,stopping the deployment of deployed island machine when requested with invalid token,"{ +deploy_id =""invalid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ideploy_stop_with_invalid_token(invalid_exec_api): + + deploy_id = ""invalid"" + r = invalid_exec_api.ideploy_stop(deploy_id, error=True) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""The error is message %s"" % rjson + " -/ideploy/rest/deploy/{UUID}/,Select a server for deployment,,200: deployed,"def test_ideploy_deploy_select_server(run_api, ilibrary_add_new_island): - params, r = ilibrary_add_new_island - uuid = r.json()[""uuid""] - deploy_on = list(run_api.clm_my_servers.keys()) - r = run_api.ideploy_deploy(uuid, deploy_on) - x = r.json() - deploy_id = x[""deploy_uuid""] - r = run_api.ideploy_delete(deploy_id) - test_assert.status(r, 201) +/ideploy/rest/stop/{UUID}/,"stopping island machine by a manager , when the manager have right over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_ideploy_stop_by_manager_with_server_right(skip_if_not_manager, custom_ilib_non_admin_operations, custom_ilib_admin_operations, run_api): + """""" + Ideploy stop by manager with server right + """""" + # when the manager manages the user and server + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_stop(deploy_id, error=True) + print(r.status_code) + test_assert.status(r, manager_rights_response(endpoint, manages_server=True, manages_user=True)) + + # when the manager manages the server but does not manages user + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_stop(deploy_id, error=True) + test_assert.status(r, manager_rights_response(endpoint, manages_server=True, manages_user=False)) " -/ideploy/rest/deploy/{UUID}/,successful deployment of an island,,,"def test_ideploy_deploy_self(ideploy_deploy): +/ideploy/rest/stop/{UUID}/,"stopping island machine by a manager , when the manager do not have right over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_ideploy_stop_by_manager_without_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): """""" - Deploy Island image + Ideploy stop by manager without server right """""" - template, r = ideploy_deploy - test_assert.status(r, 200) + # When Manager manages the user but not the server + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_stop(deploy_id, error=True) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + # when manager does not manage the user nor the server + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_stop(deploy_id, error=True) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) " -/ideploy/rest/deploy/{UUID}/,"deploying a island machine using valid existing uuid and providing name param, where the name contains slash ","{ -name : ""test/island"" -}","{ - ""status"" : 404, - ""message"" : ""Name cannot contain `/`"" -}","def test_ideploy_deploy_name_contains_slash(ilibrary_add_new_island, run_api): +/ideploy/rest/stop/{UUID}/,Stopping a deployment Stops all Segments inside of it,,,"def test_ideploy_stop_checking_state_of_segments(ideploy_start, run_api): """""" - name contains '/' + Stopping a deployment Stops all Segments inside of it """""" - params, r = ilibrary_add_new_island - uuid = r.json()[""uuid""] - r = run_api.ideploy_deploy(uuid, name=""test/island"") - rjson = r.json() - test_assert.status(r, 400) - assert rjson['error'] == ""Name cannot contain `/`"", ""|> Json %s"" % rjson + res, r = ideploy_start + deploy_id = res[""deploy_uuid""] + run_api.ideploy_stop(deploy_id) + run_api.ideploy_shutdown(deploy_id) + stop_r = run_api.ideploy_details(deploy_id) + stop_rjson = stop_r.json() + stop_network_segments = stop_rjson['island']['network_segments'] + for stop in stop_network_segments: + if stop['name'] not in (""Default Public Segment"", ""HostOnly Segment""): + assert stop['status'] == ""inactive"", ""json |> %s"" % stop " -/ideploy/rest/deploy/{UUID}/,deploying a island machine when requested with invalid token,"{ +/ideploy/rest/stop/{UUID}/,stopping a deployed island machine without authorization,"{ deploy_id =""invalid"" }","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_ideploy_deploy_invalid_token(invalid_exec_api): + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_ideploy_stop_without_authorizaton(anonymous_exec_api): """""" - deploying a island machine when requested with invalid token + without authorization """""" deploy_id = ""invalid"" - idepl_deploy = invalid_exec_api.ideploy_deploy(deploy_id, wait=False) - idepl_json = idepl_deploy.json() - test_assert.status(idepl_deploy, 401) - assert idepl_json[""detail""] == ""Invalid token."" + r = anonymous_exec_api.ideploy_stop(deploy_id, error=True) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""The error is message %s"" % rjson + + " -/ideploy/rest/deploy/{UUID}/,deploying an island using invalid uuid,"{ -deploy_id=""invalid"" -}","{ -""status"":404, -""message"" : ""failure"" -}","def test_ideploy_deploy_invalid_uuid(run_api): +/ideploy/rest/stop/{UUID}/,Stop a Deployment which you are not an owner of but with Admin rights,,"{ +""status"": 201, +}","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_stop_with_admin_rights(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): """""" - deploy with invalid uuid + stop a Deployment which you are not an owner of and with Admin rights """""" - deploy_id = ""invalid"" - r = run_api.ideploy_deploy(deploy_id) - test_assert.status(r, 404) - res = r.json() - assert res[""result""] == 'FAILURE', res - assert 'does not exist' in res[""error""], res" -/ideploy/rest/deploy/{UUID}/,deploying an island without authorization,"{ -deploy_id=""invalid"" + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_stop(deploy_id) + test_assert.status(r, 201) +" +/ideploy/rest/stop/{UUID}/,Stop a Deployment which you are not an owner of and without Admin rights,,"{ +""status"": 403, +""message"" : 'You do not have permission to perform this action.' +}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_stop_without_owner_and_admin_rights(skip_if_admin, custom_ilib_admin_operations, run_api): + """""" + Stop a Deployment which you are not an owner of and without Admin rights + """""" + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_stop(deploy_id, error=True) + test_assert.status(r, 403) + rjson = r.json() + assert rjson['error'] == 'You do not have permission to perform this action.', ""The error message is %s"" % rjson +" +/ideploy/rest/stop/{UUID}/,Check for the transition of state from Running to Stopped,,,"def test_ideploy_stop_checking_state_running_to_stop(ideploy_start, run_api): + """""" + Check for the transition of state from Running to Stopped + """""" + res, r = ideploy_start + deploy_id = res[""deploy_uuid""] + running_r = run_api.ideploy_details(deploy_id) + running_rjson = running_r.json() + assert running_rjson['state'] == 'running', ""json |> %s"" % running_rjson + run_api.ideploy_stop(deploy_id) + run_api.ideploy_shutdown(deploy_id) + stop_r = run_api.ideploy_details(deploy_id) + stop_rjson = stop_r.json() + assert stop_rjson['state'] == 'stopped', ""json |> %s"" % stop_rjson +" +/ilibrary/rest/add/,creating an island library and adding it when user is unauthorized,"{ + ""name"": ""test_ilibrary_add_required_params"", + ""is_public"": True }","{ -""status"":401, +""status"" : 401, ""message"" : ""Authentication credentials were not provided."" -}","def test_ideploy_deploy_without_authorization(anonymous_exec_api): +}","def test_ilibrary_add_without_authorization(anonymous_exec_api): """""" - deploy an island without authorization + Creating an Island Library without authorization """""" - deploy_id = ""invalid"" - idepl_deploy = anonymous_exec_api.ideploy_deploy(deploy_id, wait=False) - idepl_json = idepl_deploy.json() - test_assert.status(idepl_deploy, 401) - assert idepl_json[""detail""] == ""Authentication credentials were not provided."" + params = {} + params, r = anonymous_exec_api.ilibrary_add_new_island(params=params) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Authentication credentials were not provided.' " -/ideploy/rest/deploy/{UUID}/,deploying an island by manager when the manager has the required permissions for deployment,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_ideploy_deploy_manager_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): +/ilibrary/rest/add/,creating an island library and adding it when the segment name for NIC segment is different from what is to being added for this Island,"{ + ""name"": ""test_ilibrary_add_machine_with_other_nic"", + ""is_public"": True, + ""machines"": { + ""add"": [ + { + 'name': ""machine"", + ""uuid"": rjson[""uuid""], + ""nics"": { + ""add"": [ + { + ""model"": ""virtio"", + ""segment"": ""Other_segment"" + } + ] + } + } + ] + } + }","{ +""status"" : 400, +""message"" : ""Provided name of Segment isn't part of this Island"" +}","def test_ilibrary_add_machine_other_nic(run_api, library_add_new_vm): """""" - Deploying an Island Image by manager when have right on server + Add segment name for NIC segment as different from what is to being added for this Island """""" - # When the user is not part of the group that the manager manages - deploy_id = custom_ilib_admin_operations - r = run_api.ideploy_deploy(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + params, rjson = library_add_new_vm + params = { + ""name"": ""test_ilibrary_add_machine_with_other_nic"", + ""is_public"": True, + ""machines"": { + ""add"": [ + { + 'name': ""machine"", + ""uuid"": rjson[""uuid""], + ""nics"": { + ""add"": [ + { + ""model"": ""virtio"", + ""segment"": ""Other_segment"" + } + ] + } + } + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Provided name [Other_segment] of Segment isn't part of this Island"" - # When the user is part of the group that the manager manages and deployment is on manager rights to server - lib_id = custom_ilib_non_admin_operations - r = run_api.ideploy_deploy(lib_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) " -/ideploy/rest/deploy/{UUID}/,deploying an island by manager when the manager do not have the required permissions for deployment,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_ideploy_deploy_manager_no_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): +/ilibrary/rest/add/,creating an island library and adding it when start_ip has value greater than that of end_ip,"{ + ""name"": ""test_ilibrary_add_bigger_start_ip"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + ""name"": ""test_segment"", + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""192.168.98.150"", + ""end_ip"": ""192.168.98.1"" + } + ] + } + }","{ +""status"" : 400, +""message"" : ""end_ip must be higher than start_ip"" +}","def test_ilibrary_add_bigger_start_ip(run_api): + """""" + Creating an Island Library where start ip is bigger than end ip + """""" + params = { + ""name"": ""test_ilibrary_add_bigger_start_ip"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + ""name"": ""test_segment"", + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""192.168.98.150"", + ""end_ip"": ""192.168.98.1"" + } + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == 'end_ip must be higher than start_ip' +" +/ilibrary/rest/add/,creating an island library and adding it when segment with `Default-Public-Segment` name,"{ + ""name"": ""test_ilibrary_add_bigger_start_ip"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + 'name': 'Default-Public-Segment', + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""192.168.98.1"", + ""end_ip"": ""192.168.98.150"" + } + ] + } + }","{ +""status"" : 400, +""message"" : ""NetworkSegment name cannot contain any whitespace nor any special characters other than '_' or '-'"" +}","def test_ilibrary_add_default_segmennt_name(run_api): """""" - Deploying an Island Image by manager when have no server right + Creating an Island Library with segment name as 'Default Public Segment' """""" - # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server - lib_id = custom_ilib_admin_operations - r = run_api.ideploy_deploy(lib_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + params = { + ""name"": ""test_ilibrary_add_bigger_start_ip"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + 'name': 'Default-Public-Segment', + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""192.168.98.1"", + ""end_ip"": ""192.168.98.150"" + } + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['network_segments']['add'][0]['name'] == [""NetworkSegment name cannot contain any whitespace nor any special characters other than '_' or '-'""] - # When the user is part of the group that the manager manages but the deployment is not on manager rightful server - lib_id = custom_ilib_non_admin_operations - r = run_api.ideploy_deploy(lib_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) " -/ideploy/rest/details/{UUID}/,Confirm Machines details with the deployment,,Created as was worked,"def test_ideploy_details_comfirm_machines_details(library_add_new_vm, run_api): +/ilibrary/rest/add/,creating an island library and adding it when required fields are not provided,"{ +}","{ +""status"" : 400, +""message"" : ""Required fields should be provided"" +}","def test_ilibrary_add_without_params(run_api): """""" - Confirm network segments that were deployed with the Island + Creating an Island Library without params """""" - params1, r1 = library_add_new_vm - island_name = rand_string() - mc_name = rand_string() - params = {""name"": island_name, - ""description"": f""This is description for {island_name}"", - ""is_public"": True, - ""machines"": {""add"": [{""uuid"": r1[""uuid""], ""name"": mc_name, ""description"": f""This is description for {mc_name}"", ""nics"": {""update"": []}, ""network_segments"": {""add"": []}}]}} + params = {} params, r = run_api.ilibrary_add_new_island(params=params) - island_uuid = r.json()[""uuid""] - res = run_api.ideploy_deploy(uuid=island_uuid) - deploy_uuid = res.json()[""deploy_uuid""] - r_details = run_api.ideploy_details(deploy_uuid) - result = r_details.json() - assert result['island']['name'] == f""{params['name']} #1"" - assert result[""island""]['description'] == params['description'] - assert result['machines'][0]['name'] == f""{params['machines']['add'][0]['name']} #1"" - assert result['machines'][0]['description'] == params['machines']['add'][0]['description'] - run_api.ideploy_delete(uuid=deploy_uuid) - run_api.ilibrary_delete(uuid=island_uuid, params={}) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['name'] == ['This field is required.'] + assert rjson['is_public'] == ['This field is required.'] " -/ideploy/rest/details/{UUID}/,Confirm network segments that were deployed with the Island,,Attached as was intended,"def test_ideploy_details_comfirm_network_segments(ideploy_deploy, run_api): +/ilibrary/rest/add/,creating an island library and adding it when requested with invalid token,"{ +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ilibrary_add_with_invalid_token(invalid_exec_api): """""" - Confirm network segments that were deployed with the Island + Creating an Island Library with invalid token """""" - params, r = ideploy_deploy - network_segments = params['network_segments']['add'] - rjson = r.json() - deploy_id = rjson[""deploy_uuid""] - res = run_api.ideploy_details(deploy_id) - result = res.json() - r_network_segments = result['island']['network_segments'][2:] - - for i, j in zip(network_segments, r_network_segments): - assert i.get('name') == j.get('name') - assert i.get('description') == j.get('description') - assert i.get('enable_ipv4') == j.get('enable_ipv4') - - + params = {} + params, r = invalid_exec_api.ilibrary_add_new_island(params=params) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Invalid token.' " -/ideploy/rest/details/{UUID}/,Confirm state transition from Deploying to Stopped,,Working as intended,"def test_ideploy_details_comfirm_state_from_deployimg_to_stop(ideploy_details): +/ilibrary/rest/add/,creating an island library and adding it when provided with NIC id of machine which is not part of the current machine,"{ + ""name"": ""test_ilibrary_add_machine_with_other_nic_id"", + ""is_public"": True, + ""machines"": { + ""add"": [ + { + 'name': ""machine"", + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""update"": [ + { + 'id': second_nic, + ""model"": ""virtio"" + } + ] + } + } + ] + } + }","{ +""status"" : 400, +""message"" : ""The provided nic with id isn't part of this machine"" +}","def test_ilibrary_add_machine_other_nic_id(run_api): """""" - Confirm state transition from Deploying to Stopped + Adding Machine with id of NIC which is not part of this machine but some other machine """""" - x, r = ideploy_details - rjson = r.json() - assert rjson['state'] == ""stopped"", 'The error is %s' % rjson['state'] + networks = template_networks() + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + params2, r2 = run_api.library_add_new_vm(networks=networks) + # first_nic = r1.json()['hw']['networks'][0]['id'] + second_nic = r2.json()['hw']['networks'][0]['id'] + params = { + ""name"": ""test_ilibrary_add_machine_with_other_nic_id"", + ""is_public"": True, + ""machines"": { + ""add"": [ + { + 'name': ""machine"", + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""update"": [ + { + 'id': second_nic, + ""model"": ""virtio"" + } + ] + } + } + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""The provided nic with id ["" + str(second_nic) + ""] isn't part of this machine"" + run_api.library_delete(r1.json()[""uuid""]) + run_api.library_delete(r2.json()[""uuid""]) " -/ideploy/rest/details/{UUID}/,fetching the deployment details of deployed island machine using invalid uuid ,"{ -deploy_id =""invalid"" -} - +/ilibrary/rest/add/,creating an island library and adding it when provided start_ip and/or end_ip value is out of range as that of bridge_ip/Subnet range,"{ + ""name"": ""test_ilibrary_add_ips_out_of_range"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + ""name"": ""test_segment"", + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""191.168.10.1"", + ""end_ip"": ""191.168.10.150"" + } + ] + } + } ","{ - ""status"" : 404, - ""message"" : ""Deployment of Island with uuid:invalid doesn't exists"" -}","def test_ideploy_details_with_invalid_uuid(run_api): +""status"" : 400, +""message"" : ""start_ip and/or end_ip should lie between inclusive range "" +}","def test_ilibrary_add_ips_out_of_range(run_api): """""" - Details of Island uuid does not exists + Creating an Island Library with out of range start ip, end ip """""" - deploy_id = ""invalid"" - r = run_api.ideploy_details(deploy_id) - test_assert.status(r, 404) + params = { + ""name"": ""test_ilibrary_add_ips_out_of_range"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + ""name"": ""test_segment"", + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""191.168.10.1"", + ""end_ip"": ""191.168.10.150"" + } + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) rjson = r.json() - assert rjson['error'] == ""Deployment of Island with uuid:invalid doesn't exists"", ""The error message is %s"" % rjson['error'] - + assert ""start_ip and/or end_ip should lie between inclusive range of"" in rjson['error'] " -/ideploy/rest/details/{UUID}/,fetching details the deployed island machine using valid uuid,,"{ -""status"":200 -}","def test_ideploy_details(ideploy_details): +/ilibrary/rest/add/,creating an island library and adding it when provided segment without name,"{ + ""name"": ""test_ilibrary_add_bigger_start_ip"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""192.168.98.1"", + ""end_ip"": ""192.168.98.150"" + } + ] + } + }","{ +""status"" : 400, +""message"" : ""This field must not be blank"" +}","def test_ilibrary_add_without_segmennt_name(run_api): + """""" + Creating an Island Library without segment name + """""" + params = { + ""name"": ""test_ilibrary_add_bigger_start_ip"", + ""is_public"": True, + ""network_segments"": { + ""add"": [ + { + ""enable_ipv4"": True, + ""bridge_ip"": ""192.168.98.0"", + ""network_subnet"": ""255.255.255.0"", + ""enable_dhcp"": True, + ""start_ip"": ""192.168.98.1"", + ""end_ip"": ""192.168.98.150"" + } + ] + } + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['network_segments']['add'][0]['name'] == ['This field is required.'] +" +/ilibrary/rest/add/,creating an island library and adding it when provided machine with no name,"{ + ""name"": ""test_ilibrary_add_machine_from_other_island"", + ""machines"": { + ""add"": [ + { + 'name': """", + ""uuid"": rjson[""uuid""] + } + ] + }, + ""is_public"": True + }","{ +""status"" : 201, +""response"" : success , island library created +}"," +def test_ilibrary_add_machine_with_no_name(run_api, library_add_new_vm): """""" - Getting Island deploy details + Creating an Island Library of machine with no name """""" - x, r = ideploy_details - test_assert.status(r, 200) + params, rjson = library_add_new_vm + params = { + ""name"": ""test_ilibrary_add_machine_from_other_island"", + ""machines"": { + ""add"": [ + { + 'name': """", + ""uuid"": rjson[""uuid""] + } + ] + }, + ""is_public"": True + } + params1, r1 = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r1, 201) + rjson1 = r1.json() + run_api.library_delete(rjson[""uuid""], params) + if 'error' not in rjson.keys(): + uuid = rjson1[""uuid""] + run_api.ilibrary_delete(uuid, params1) " -/ideploy/rest/details/{UUID}/,fetching the details of deployed island machine when requested with invalid token,"{ -deploy_id =""invalid"" +/ilibrary/rest/add/,creating an island library and adding it when provided Island with no name,"{ + ""name"": """", + ""is_public"": True }","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_ideploy_details_with_invalid_token(invalid_exec_api): +""status"" : 400, +""message"" : ""This field must not be blank"" +}","def test_ilibrary_add_empty_island_name(run_api): """""" - Invalid Token + Creating an Island Library with empty island name """""" - deploy_id = ""invalid"" - r = invalid_exec_api.ideploy_details(deploy_id) - test_assert.status(r, 401) + params = { + ""name"": """", + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""The error message is %s"" % rjson['detail'] - - + assert rjson['name'] == ['This field may not be blank.'] " -/ideploy/rest/details/{UUID}/,fetching the details of deployed island machine without authorization,"{ -deploy_id =""invalid"" +/ilibrary/rest/add/,creating an island library and adding it when only the required params,"{ + ""name"": ""test_ilibrary_add_required_params"", + ""is_public"": True }","{ - ""status"" : 401, - ""message"" : ""Authentication credentials were not provided."" -}","def test_ideploy_details_without_authorization(anonymous_exec_api): +""status"" : 201, +""response"" : success , island library created +}","def test_ilibrary_add_required_params(run_api): """""" - without authorization + Creating an Island Library with required params """""" - deploy_id = ""invalid"" - r = anonymous_exec_api.ideploy_details(deploy_id) - test_assert.status(r, 401) + params = { + ""name"": ""test_ilibrary_add_required_params"", + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 201) rjson = r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""The error message is %s"" % rjson['detail'] - + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) " -/ideploy/rest/edit/{UUID}/,Add duplicate MACs inside NIC of different Machines and set allow_duplicate_network as false ,,400: Bad Request," -def test_ideploy_edit_update_duplicate_mac_in_different_machines(run_api): - """""" - Editing the Island deploy details by Add duplicate MACs inside NIC of different Machines and set allow_duplicate_network as false - """""" - networks = template_networks() - if run_api.arch_type == ""aarch64"": - params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - else: - params1, r1 = run_api.library_add_new_vm(networks=networks) - params2, r2 = run_api.library_add_new_vm(networks=networks) - - params3 = { - ""name"": ""test_ideploy"", +/ilibrary/rest/add/,creating an island library and adding it when machine which is a part of another Island is provided,"{ + ""name"": ""test_ilibrary_add_machine_from_other_island"", ""machines"": { ""add"": [ { - ""uuid"": r1.json()[""uuid""] - }, - { - ""uuid"": r2.json()[""uuid""] + ""uuid"": uuid } ] }, - ""is_public"": False - } - params3, r3 = run_api.ilibrary_add_new_island(params=params3) - rjson3 = r3.json() - uuid = rjson3['uuid'] - r = run_api.ideploy_deploy(uuid) - deploy_id = r.json()['deploy_uuid'] - r = run_api.ideploy_details(deploy_id) - rjson = r.json() - machine1_uuid = rjson['machines'][0]['uuid'] - machine2_uuid = rjson['machines'][0]['uuid'] - island_uuid = rjson['island']['uuid'] - params3 = { - ""updated_machines"": [ - { - ""uuid"": machine1_uuid, - 'nics': { - 'add': [ - { - ""model"": ""virtio"", - 'mac': '56:54:00:0C:8A:4A' - } - ] - } - }, - { - ""uuid"": machine2_uuid, - 'nics': { - 'add': [ - { - ""model"": ""virtio"", - 'mac': '56:54:00:0C:8A:4A' - } - ] - } - } - ], - 'allow_duplicate_network': False - } - r = run_api.ideploy_edit(deploy_id, params=params3) - test_assert.status(r, 400) - res = r.json() - assert res['error'] == ""Island ["" + island_uuid + ""] getting duplicate mac_addresses. Use `allow_duplicate_network` to force continue..."" - run_api.ideploy_delete(deploy_id) - if 'error' not in rjson3.keys(): - run_api.ilibrary_delete(uuid, params3) - run_api.library_delete(r1.json()[""uuid""]) - run_api.library_delete(r2.json()[""uuid""]) - -" -/ideploy/rest/edit/{UUID}/,Add duplicate MACs inside NIC of same Machine and set allow_duplicate_network as false,,400: Bad Request," -def test_ideploy_edit_add_duplicate_mac_in_same_machine(run_api): + ""is_public"": True + }","{ +""status"" : 400, +""message"" : ""Adding machine which already is a part of an Island isn't supported..."" +}","def test_ilibrary_add_machine_from_other_island(run_api, ilibrary_add_new_island): """""" - Editing the Island deploy details by Add duplicate MACs inside NIC of same Machine and set allow_duplicate_network as false + Creating an Island Library by adding machine from another island """""" - networks = template_networks() - if run_api.arch_type == ""aarch64"": - params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - else: - params1, r1 = run_api.library_add_new_vm(networks=networks) - - params3 = { - ""name"": ""test_ideploy"", + params, r = ilibrary_add_new_island + rjson = r.json() + machines = rjson['machines'] + uuid = machines[0]['uuid'] + params = { + ""name"": ""test_ilibrary_add_machine_from_other_island"", ""machines"": { ""add"": [ { - ""uuid"": r1.json()[""uuid""] + ""uuid"": uuid } ] }, - ""is_public"": False - } - params3, r3 = run_api.ilibrary_add_new_island(params=params3) - rjson3 = r3.json() - uuid = rjson3['uuid'] - - r = run_api.ideploy_deploy(uuid) - deploy_id = r.json()['deploy_uuid'] - r = run_api.ideploy_details(deploy_id) - rjson = r.json() - machine_uuid = rjson['machines'][0]['uuid'] - machine_mac = rjson['machines'][0]['machine']['hw']['networks'][0]['mac'] - island_uuid = rjson['island']['uuid'] - - params3 = { - ""updated_machines"": [ - { - ""uuid"": machine_uuid, - 'nics': { - 'add': [ - { - ""model"": ""virtio"", - 'mac': machine_mac - } - ] - } - } - ], - 'allow_duplicate_network': False + ""is_public"": True } - r = run_api.ideploy_edit(deploy_id, params=params3) - test_assert.status(r, 400) - res = r.json() - assert res['error'] == ""Island ["" + island_uuid + ""] getting duplicate mac_addresses. Use `allow_duplicate_network` to force continue..."" - run_api.ideploy_delete(deploy_id) - if 'error' not in rjson3.keys(): - run_api.ilibrary_delete(uuid, params3) - run_api.library_delete(r1.json()[""uuid""]) -" -/ideploy/rest/edit/{UUID}/,Edit a Deployment which you are not an owner of and without Admin rights,"{ -""name"": ""modified_colama"", -""description"": ""testing for edit"", -""allow_duplicate_network"": False -}",403: Forbidden," -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_edit_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): - """""" - Changing of Island Deployed Image by non-Admin - """""" - # Non-admin check for changing details of a Deployed Image created by different user. - ideploy_id = custom_ilib_admin_operations - edit_param = {""name"": ""modified_colama"", ""description"": ""testing for edit"", 'allow_duplicate_network': False} - r = run_api.ideploy_edit(ideploy_id, params=edit_param) - test_assert.status(r, 403) -" -/ideploy/rest/edit/{UUID}/,Edit a Deployment which you are not an owner of but with Admin rights,"{ -""name"": ""modified_colama"", -""description"": ""testing for edit"", -""allow_duplicate_network"": False -}",202: Deployed Island Details,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_edit_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): - """""" - Changing of Island Deployed Image by Admin - """""" - # Admin check for changing details of a Deployed Image created by different user. - ideploy_id = custom_ilib_non_admin_operations - edit_param = {""name"": ""modified_colama"", ""description"": ""testing for edit"", 'allow_duplicate_network': False} - r = run_api.ideploy_edit(ideploy_id, params=edit_param) - test_assert.status(r, 202) + params, r = run_api.ilibrary_add_new_island(params=params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Adding machine which already is a part of an Island isn't supported..."" " -/ideploy/rest/edit/{UUID}/,Editing a Deployment with no parameters,,"400: Bad Request: ""allow_duplicate_network"" field is required","def test_ideploy_edit_no_parameters(run_api, ideploy_deploy): +/ilibrary/rest/add/,creating an island library and adding it when invalid UUID of machine is provided,"{ + ""name"": ""test_ilibrary_add_invalid_uuid_machine"", + ""machines"": { + ""add"": [ + { + ""uuid"": ""invalid-uuid"" + } + ] + }, + ""is_public"": True + }","{ +""status"" : 400, +""message"" : ""Valid UUID must be provided"" +}","def test_ilibrary_add_invalid_uuid_machine(run_api): """""" - Editing the Island deploy details with No Parameters + Creating an Island Library with invalid uuid """""" - param, result = ideploy_deploy - rjson = result.json() - deploy_id = rjson[""deploy_uuid""] - edit_param = {} - r = run_api.ideploy_edit(deploy_id, params=edit_param) + params = { + ""name"": ""test_ilibrary_add_invalid_uuid_machine"", + ""machines"": { + ""add"": [ + { + ""uuid"": ""invalid-uuid"" + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) test_assert.status(r, 400) rjson = r.json() - assert rjson['allow_duplicate_network'] == ['This field is required.'] - - + assert rjson['machines']['add'][0]['uuid'] == ['Must be a valid UUID.'] " -/ideploy/rest/edit/{UUID}/,editing the deployment of island machine when requested with invalid token,"deploy_id = ""invalid-deploy_uuid"" -edit_param = { -""name"": ""modified_colama"", -""description"": ""testing for edit"", - ""allow_duplicate_network"": False -}","{ - ""status"" : 404, -}","def test_ideploy_edit_with_invalid_uuid(run_api): +/ilibrary/rest/add/,creating an island library and adding it,,"{ +""status"" : 201, +""response"" : success +}","def test_ilibrary_add(run_api, ilibrary_add_new_island): """""" - Editing the Island deploy details by invalid uuid + Creating an Island Library """""" - deploy_id = ""invalid-deploy_uuid"" - edit_param = {""name"": ""modified_colama"", - ""description"": ""testing for edit"", - 'allow_duplicate_network': False - } - r = run_api.ideploy_edit(deploy_id, params=edit_param) - test_assert.status(r, 404) + params, r = ilibrary_add_new_island + test_assert.status(r, 201) " -/ideploy/rest/edit/{UUID}/,resuming the deployed island machine when requested with invalid token,"deploy_id = ""invalid-deploy_uuid"" -edit_param = { -""name"": ""modified_colama"", -""description"": ""testing for edit"", -""allow_duplicate_network"": False -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_ideploy_edit_with_invalid_token(invalid_exec_api): +/ilibrary/rest/bulk_delete/,sucessful deletion of island library,,"{ +""status"" : 204 +}","def test_ilibrary_bulk_delete(ilibrary_bulk_delete): """""" - Editing the Island deploy details with invalid token + Deleting multiple Island Library """""" - deploy_id = ""invalid-deploy_uuid"" - edit_param = {""name"": ""modified_colama"", - ""description"": ""testing for edit"", - 'allow_duplicate_network': False - } - r = invalid_exec_api.ideploy_edit(deploy_id, params=edit_param) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Invalid token."" - - + params, r = ilibrary_bulk_delete + test_assert.status(r, 204) " -/ideploy/rest/edit/{UUID}/,editting the deployed island machine without authorization,"{ -""name"": ""modified_colama"", -""description"": ""testing for edit"", - 'allow_duplicate_network': False +/ilibrary/rest/bulk_delete/,"deleting the island library using island_list, where the list is set to null","{ +""island_list"" :None }","{ - ""status"" : 401, - ""message"" : ""Authentication credentials were not provided."" -}","def test_ideploy_edit_without_authorization(anonymous_exec_api): - """""" - Editing the Island deploy details without authorization - """""" - deploy_id = ""invalid-deploy_uuid"" - edit_param = {""name"": ""modified_colama"", - ""description"": ""testing for edit"", - 'allow_duplicate_network': False - } - r = anonymous_exec_api.ideploy_edit(deploy_id, params=edit_param) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Authentication credentials were not provided."" - -" -/ideploy/rest/list/,fetching the filtered list of deployed islands. Check the user type before performing the operation.,,"{ - ""status"": 200, - ""response"": filtered list of deployed islands -}","def test_ideploy_list_filter(run_api): +""status"":400, +""message"" : ""island_list cannot be null or empty"" +}","def test_ilibrary_bulk_delete_null_island_list(run_api): """""" - Fetching the list of deployed islands by adding filters + Deleting ilibrary with empty and null island_list """""" - params, res, isl_res = [], [], [] - ideploy_count = 10 - arch = run_api.arch_type - prefix_name = f""filter_island_2_{rand_string()}_"" - isl_lib_name = [f""{prefix_name}{rand_string()}"" for _ in range(ideploy_count)] - networks = template_networks() - if arch == ""aarch64"": - params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - params3, r3 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - else: - params1, r1 = run_api.library_add_new_vm(networks=networks) - params2, r2 = run_api.library_add_new_vm(networks=networks) - params3, r3 = run_api.library_add_new_vm(networks=networks) - machine1 = { - ""uuid"": r1.json()[""uuid""], - ""nic_update_id"": r1.json()[""hw""][""networks""][0][""id""], - ""nic_delete_id"": r1.json()[""hw""][""networks""][2][""id""] - } - machine2 = { - ""uuid"": r2.json()[""uuid""], - ""nic_update_id"": r2.json()[""hw""][""networks""][1][""id""], - ""nic_delete_id"": r2.json()[""hw""][""networks""][0][""id""] - } - machine3 = { - ""uuid"": r3.json()[""uuid""], - ""nic_update_id"": r3.json()[""hw""][""networks""][2][""id""], - ""nic_delete_id"": r3.json()[""hw""][""networks""][1][""id""] - } - for i in range(ideploy_count): - param, r = run_api.ilibrary_add_new_island(machine1=machine1, machine2=machine2, - machine3=machine3, name=isl_lib_name[i]) - isl_uuid = r.json()[""uuid""] - params.append(param) - res.append(r) - isl_r = run_api.ideploy_deploy(isl_uuid) - isl_res.append(isl_r) - random_int = randint(0, 9) - name_filter = {""name"": params[random_int].get(""name"") + "" #1"", ""page_size"": ideploy_count} - uuid_filter = {""uuid"": isl_res[random_int].json().get(""deploy_uuid""), ""page_size"": ideploy_count} - owner_filter = {""owner"": ""colama"" if run_api.user_type == ""admin"" - else ""vivekt"" if run_api.user_type == ""non-admin"" - else ""manager"", ""search"": prefix_name, ""page_size"": ideploy_count} - exp_res = { - 0: [i.get(""name"") for i in params if i.get(""name"") + "" #1"" == name_filter.get(""name"")], - 1: [i.json().get(""deploy_uuid"") for i in isl_res if i.json().get(""deploy_uuid"") == uuid_filter.get(""uuid"")], - 2: [i.json().get(""owner"") for i in res], + islands = { + ""island_list"": None } - filters = [name_filter, uuid_filter, owner_filter] - for filter in range(len(filters)): - r = run_api.ideploy_list(filters[filter]) - # check for valid response data with the filter parameters - if len(r.json().get(""results"")) != len(exp_res[filter]): - logging.error(f""error in filter: {filters[filter]}, the list of expected result for the filter is: {exp_res[filter]}, and the actual result is {r.json()}"") - assert False - test_assert.status(r, 200) - run_api.library_delete(r1.json()[""uuid""], params1) - run_api.library_delete(r2.json()[""uuid""], params2) - run_api.library_delete(r3.json()[""uuid""], params3) - for i in range(ideploy_count): - isl_rjson = isl_res[i].json() - if 'error' not in isl_rjson.keys(): - uuid = isl_rjson[""deploy_uuid""] - run_api.ideploy_delete(uuid) - ilib_rjson = res[i].json() - if 'error' not in ilib_rjson.keys(): - uuid = ilib_rjson[""uuid""] - run_api.ilibrary_delete(uuid, params[i]) + r = run_api.ilibrary_bulk_delete(islands) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""island_list cannot be null or empty"", ""|> Json %s"" % rjson " -/ideploy/rest/list/,fetching the list of deployed islands when requested with invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_ideploy_list_invalid_token(invalid_exec_api): +/ilibrary/rest/bulk_delete/,"deleting the island library using island_list, where the list is set to empty string","{ +""island_list"" : """" +}","{ +""status"":400, +""message"" : ""island_list cannot be null or empty"" +}","def test_ilibrary_bulk_delete_empty_island_list(run_api): """""" - Fetching the list of deployed islands by invalid token + Deleting ilibrary with empty and null island_list """""" - idepl_list = invalid_exec_api.ideploy_list() - idepl_json = idepl_list.json() - test_assert.status(idepl_list, 401) - assert idepl_json[""detail""] == ""Invalid token."" - -" -/ideploy/rest/list/,fetching the list of deployed islands by providing tag value,"{ -""scope"": ""all"" + islands = { + ""island_list"": """" + } + r = run_api.ilibrary_bulk_delete(islands) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""island_list cannot be null or empty"", ""|> Json %s"" % rjson" +/ilibrary/rest/bulk_delete/,"deleting the island library using island_list, where the list is set to empty list","{ +""island_list"" :[] }","{ - ""status"": 200, - ""response"": list of deployed islands -}","def test_ideploy_list_by_scope(run_api): - """""" - filter by scope +""status"":400, +""message"" : ""island_list cannot be null or empty"" +}","def test_ilibrary_bulk_delete_empty_list_island_list(run_api): """""" - params = {""scope"": ""all""} - r = run_api.ideploy_list(params) - test_assert.status(r, 200) -" -/ideploy/rest/list/,fetching the list of deployed islands by user who does not own the deployed image,"{ - uuid = 'valid-deployment-uuid' + Deleting ilibrary with empty and null island_list + """""" + islands = { + ""island_list"": [] + } + r = run_api.ilibrary_bulk_delete(islands) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""island_list cannot be null or empty"", ""|> Json %s"" % rjson" +/ilibrary/rest/bulk_delete/,deleting the island library using invalid data type of island_list,"{ +""island_list"": ""string"" }","{ - ""status"": 200, - ""response"": list of deployed islands -}","def test_ideploy_list_island_not_owner(skip_if_non_admin, non_admin_exec_api, ideploy_deploy): +""status"":400, +""message"" : ""Please provide the list of uuids not strings"" +}","def test_ilibrary_bulk_delete_invalid_data_type(run_api): """""" - Fetching the list of deployed islands of other user + Deleting ilibrary with invalid data type island_list """""" - template, r = ideploy_deploy - idepl_list = non_admin_exec_api.ideploy_list() - user = non_admin_exec_api.user - owners = [result['island']['owner'] for result in idepl_list.json()['results'] if result['island']['owner'] != user] - test_assert.status(idepl_list, 200) - assert len(owners) == 0 + islands = { + ""island_list"": ""string"" + } + r = run_api.ilibrary_bulk_delete(islands) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Please provide the list of uuids not strings"", ""|> Json %s"" % rjson " -/ideploy/rest/list/,fetching the list of deployed islands when no Token Provided,"{ - uuid = 'valid-existing-island-library-uuid' +/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library without Authorization,"{ + uuid = 'valid-ilibrary-uuid' }","{ ""status"" : 401, ""message"" : ""Authentication credentials were not provided."" -}","def test_ideploy_list_without_authorization(anonymous_exec_api): +}","def test_ilibrary_clone_without_authorization(anonymous_exec_api): """""" - Fetching the list of deployed islands without authorization + Creating a clone of an Island Library without authorization """""" - idepl_list = anonymous_exec_api.ideploy_list() - idepl_json = idepl_list.json() - test_assert.status(idepl_list, 401) - assert idepl_json[""detail""] == ""Authentication credentials were not provided."" - + uuid = 'valid-ilibrary-uuid' + params, r = anonymous_exec_api.ilibrary_clone_island(uuid) + test_assert.status(r, 401) + res = r.json() + assert res['detail'] == ""Authentication credentials were not provided."" " -/ideploy/rest/list/,"fetching the list of deployed islands when provided with tag name in [""_sessionid"", ""_session_name"", ""_session_created_on""]","{ -tag_name = ""valid-name"" -}","{ +/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library with some name and empty description,"{ + ""name"": ""test_clone"", + ""description"": """" + }","{ ""status"": 200, - ""response"": list of deployed islands -}","def test_ideploy_list_by_tag_name(run_api, ideploy_details): + ""response"": island library cloned +}","def test_ilibrary_clone_with_name_empty_description(run_api, ilibrary_add_new_island): """""" - when provided with tag name + Creating a clone of an Island Library with name and empty description """""" - x, r = ideploy_details - detail = r.json() - tag_value = detail['tags'][0]['name'] - params = {""tags"": tag_value} - r = run_api.ideploy_list(params) + params1, r1 = ilibrary_add_new_island + uuid = r1.json()[""uuid""] + clone = { + ""name"": ""test_clone"", + ""description"": """" + } + params, r = run_api.ilibrary_clone_island(uuid, params=clone) test_assert.status(r, 200) rjson = r.json() - for island in rjson['results']: - result = run_api.ideploy_details(island['uuid']).json() - assert tag_value == result['tags'][0]['name'], ""|> Json %s"" % result + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) + " -/ideploy/rest/list/,fetching the list of deployed islands when provided with tag value,"{ -""tags"": tag_value +/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library when requested with invalid token,"{ + uuid = 'invalid-ilibrary-uuid' }","{ - ""status"": 200, - ""response"": list of deployed islands -}","def test_ideploy_list_by_tag_value(run_api, ideploy_details): +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ilibrary_clone_with_invalid_token(invalid_exec_api): """""" - when provided with tag value + Creating a clone of an Island Library with invalid token """""" - x, r = ideploy_details - detail = r.json() - tag_value = detail['tags'][0]['value'] - params = {""tags"": tag_value} - r = run_api.ideploy_list(params) - test_assert.status(r, 200) - rjson = r.json() - for island in rjson['results']: - result = run_api.ideploy_details(island['uuid']).json() - assert tag_value == result['tags'][0]['value'], ""|> Json %s"" % result + uuid = 'invalid-ilibrary-uuid' + params, r = invalid_exec_api.ilibrary_clone_island(uuid) + test_assert.status(r, 401) + res = r.json() + assert res['detail'] == ""Invalid token."" " -/ideploy/rest/list/,fetching the list of deployed islands which is filtered on created and update DateTime Filter,"{ - uuid = 'valid-deployment-uuid' -}",,"def test_ideploy_filter_timefilter(run_api: apiops, ilibrary_add_new_island): +/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library when Island UUID does not exist,"{ + uuid = 'invalid-ilibrary-uuid' +}","{ + ""status"": 404, + ""response"": not found +}","def test_ilibrary_clone_invalid_uuid(run_api): """""" - Filter on created and update DateTime Filter + Creating a clone of an Island Library with invalid uuid """""" - template, r = ilibrary_add_new_island - rjson = r.json() - ilib_id = rjson[""uuid""] - r = run_api.ideploy_deploy(ilib_id) - ideploy_id = r.json()[""deploy_uuid""] - r_details = run_api.ideploy_details(ideploy_id).json() - # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' - str_ctime = r_details['ctime'].replace('T', ' ').replace('Z', '') - datetime_ctime = convert_datetime_stringform(r_details['ctime']) - - def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): - """""" - Function to handle corner case if machine was created a day before and test get triggered on new day - """""" - if not utc: - created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' - created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, - ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 - # Filter on UTC time - # .... When the datetime is selected to be the same as in detail - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, - ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""page_size"": 1}).json()['count'] == 0 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), - ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""page_size"": 1}).json()['count'] == 1 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), - ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 - # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today - try: - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday - try: - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week - try: - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month - try: - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' - try: - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # .........When the created_date_range format is invalid - response = run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) - test_assert.status(response, 400) - assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() - # .........When the created_start_date and created_end_date has whitespaces in them - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 - - # Filter on IST time - # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 - # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 - # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 0 - # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 1 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today - try: - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 IST and test get triggered at 00:00:00.0000000 IST - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday - try: - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 IST and test get triggered at 00:00:00.0000000 IST - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week - try: - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on week's last day at 23:59:59.9999999 IST and test get triggered on new week at 00:00:00.0000000 IST - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month - try: - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on month's last day at 23:59:59.9999999 IST and test get triggered on new month at 00:00:00.0000000 IST - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year - try: - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on year last day at 23:59:59.9999999 IST and test get triggered on new year at 00:00:00.0000000 IST - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # .........When the created_date_range format is invalid - response = run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""invalid"", ""page_size"": 1}) - test_assert.status(response, 400) - assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() - # .........When the created_start_date and created_end_date has whitespaces in them - assert run_api.ideploy_list({""uuid"": ideploy_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", - ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 - run_api.ideploy_delete(ideploy_id) + uuid = 'invalid-ilibrary-uuid' + params, r = run_api.ilibrary_clone_island(uuid) + test_assert.status(r, 404) +" +/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library by non-admin user,"{ + ""name"": ""test_clone"", + ""description"": ""cloning private island without admin rights"" + }","{ + ""status"": 403 +}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] + +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_clone_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): + """""" + Creating a clone of an private Island Library without admin rights whose owner is not current user + """""" + ilib_id = custom_ilib_admin_operations + clone = { + ""name"": ""test_clone"", + ""description"": ""cloning private island without admin rights"" + } + params, r = run_api.ilibrary_clone_island(ilib_id, params=clone) + test_assert.status(r, 403) " -/ideploy/rest/list/,successfully fetching the list of deployed islands,,"{ -""status"":200, -""response"": list of deployed islands -}","def test_ideploy_list(ideploy_list): +/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library,,"{ + ""status"": 200, + ""response"": island library cloned +}","def test_ilibrary_clone(run_api, ilibrary_clone_island): """""" - Fetching the list of deployed islands + Creating a clone of an Island Library """""" - template, r = ideploy_list + params, r = ilibrary_clone_island + result = r.json() + test_assert.status(params, result, ""ilibrary_clone"") test_assert.status(r, 200) " -/ideploy/rest/list/,"successfully fetching the list of deployed islands by adding filters. Check the user type before performing the operation. -",,"{ -""status"":200, -""response"": list of deployed islands -}","def test_ideploy_list_filter(run_api): +/ilibrary/rest/clone/{UUID}/,creating a clone of an ilibrary without name and description,"{ +}","{ + ""status"": 400, + ""response"": field required +}","def test_ilibrary_clone_without_name_and_description(run_api, ilibrary_add_new_island): """""" - Fetching the list of deployed islands by adding filters + Creating a clone of an Island Library without name and without description """""" - params, res, isl_res = [], [], [] - ideploy_count = 10 - arch = run_api.arch_type - prefix_name = f""filter_island_2_{rand_string()}_"" - isl_lib_name = [f""{prefix_name}{rand_string()}"" for _ in range(ideploy_count)] - networks = template_networks() - if arch == ""aarch64"": - params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - params3, r3 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - else: - params1, r1 = run_api.library_add_new_vm(networks=networks) - params2, r2 = run_api.library_add_new_vm(networks=networks) - params3, r3 = run_api.library_add_new_vm(networks=networks) - machine1 = { - ""uuid"": r1.json()[""uuid""], - ""nic_update_id"": r1.json()[""hw""][""networks""][0][""id""], - ""nic_delete_id"": r1.json()[""hw""][""networks""][2][""id""] - } - machine2 = { - ""uuid"": r2.json()[""uuid""], - ""nic_update_id"": r2.json()[""hw""][""networks""][1][""id""], - ""nic_delete_id"": r2.json()[""hw""][""networks""][0][""id""] + params1, r1 = ilibrary_add_new_island + uuid = r1.json()[""uuid""] + params, r = run_api.ilibrary_clone_island(uuid, params={}) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['name'] == ['This field is required.'] +" +/ilibrary/rest/clone/{UUID}/,creating a clone of an ilibrary with name but no description,"{ + ""name"": ""test_clone"" +}","{ + ""status"": 200, + ""response"": island library cloned +}","def test_ilibrary_clone_with_name_only(run_api, ilibrary_add_new_island): + """""" + Creating a clone of an Island Library with name only + """""" + params1, r1 = ilibrary_add_new_island + uuid = r1.json()[""uuid""] + clone = { + ""name"": ""test_clone"" } - machine3 = { - ""uuid"": r3.json()[""uuid""], - ""nic_update_id"": r3.json()[""hw""][""networks""][2][""id""], - ""nic_delete_id"": r3.json()[""hw""][""networks""][1][""id""] + params, r = run_api.ilibrary_clone_island(uuid, params=clone) + test_assert.status(r, 200) + rjson = r.json() + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) + +" +/ilibrary/rest/clone/{UUID}/,clone a Public Island and check is_public flag on cloned island is False,"{ + ""name"": ""test_ilibrary_clone_public_island"", + ""is_public"": True + }","{ + ""status"": 200, + ""response"": island library cloned +}","def test_ilibrary_clone_public_island(run_api): + """""" + Creating a clone of an public Island Library and checking is_public flag on cloned island is False + """""" + params = { + ""name"": ""test_ilibrary_clone_public_island"", + ""is_public"": True } - for i in range(ideploy_count): - param, r = run_api.ilibrary_add_new_island(machine1=machine1, machine2=machine2, - machine3=machine3, name=isl_lib_name[i]) - isl_uuid = r.json()[""uuid""] - params.append(param) - res.append(r) - isl_r = run_api.ideploy_deploy(isl_uuid) - isl_res.append(isl_r) - random_int = randint(0, 9) - name_filter = {""name"": params[random_int].get(""name"") + "" #1"", ""page_size"": ideploy_count} - uuid_filter = {""uuid"": isl_res[random_int].json().get(""deploy_uuid""), ""page_size"": ideploy_count} - owner_filter = {""owner"": ""colama"" if run_api.user_type == ""admin"" - else ""vivekt"" if run_api.user_type == ""non-admin"" - else ""manager"", ""search"": prefix_name, ""page_size"": ideploy_count} - exp_res = { - 0: [i.get(""name"") for i in params if i.get(""name"") + "" #1"" == name_filter.get(""name"")], - 1: [i.json().get(""deploy_uuid"") for i in isl_res if i.json().get(""deploy_uuid"") == uuid_filter.get(""uuid"")], - 2: [i.json().get(""owner"") for i in res], + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + clone = { + ""name"": ""test_clone"", + ""description"": ""cloning private island by admin whose owner is not admin"" } - filters = [name_filter, uuid_filter, owner_filter] - for filter in range(len(filters)): - r = run_api.ideploy_list(filters[filter]) - # check for valid response data with the filter parameters - if len(r.json().get(""results"")) != len(exp_res[filter]): - logging.error(f""error in filter: {filters[filter]}, the list of expected result for the filter is: {exp_res[filter]}, and the actual result is {r.json()}"") - assert False - test_assert.status(r, 200) - run_api.library_delete(r1.json()[""uuid""], params1) - run_api.library_delete(r2.json()[""uuid""], params2) - run_api.library_delete(r3.json()[""uuid""], params3) - for i in range(ideploy_count): - isl_rjson = isl_res[i].json() - if 'error' not in isl_rjson.keys(): - uuid = isl_rjson[""deploy_uuid""] - run_api.ideploy_delete(uuid) - ilib_rjson = res[i].json() - if 'error' not in ilib_rjson.keys(): - uuid = ilib_rjson[""uuid""] - run_api.ilibrary_delete(uuid, params[i]) + params, r = run_api.ilibrary_clone_island(uuid, params=clone) + rjson1 = r.json() + test_assert.status(r, 200) + assert rjson1['is_public'] is False + if 'error' not in rjson1.keys(): + uuid = rjson1[""uuid""] + run_api.ilibrary_delete(uuid, params) + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) " -/ideploy/rest/list/,successfully fetching the list of deployed islands using invalid uuid,,"{ -""status"":200, -""response"": list of deployed islands -}","def test_ideploy_list_invalid_uuid(run_api): +/ilibrary/rest/clone/{UUID}/,Clone a Private Island which you are not an owner of but with Admin Rights,"{ + ""name"": ""test_clone"", + ""description"": ""cloning private island by admin whose owner is not admin"" +}","{ + ""status"": 200, + ""response"": island library cloned +}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] + + +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_clone_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): """""" - Fetching the list of deployed islands using invalid uuid + Creating a clone of an private Island Library by admin whose owner is not admin user """""" - params = {""uuid"": ""invalid""} - r = run_api.ideploy_list(params) + ilib_id = custom_ilib_non_admin_operations + clone = { + ""name"": ""test_clone"", + ""description"": ""cloning private island by admin whose owner is not admin"" + } + params, r = run_api.ilibrary_clone_island(ilib_id, params=clone) test_assert.status(r, 200) + rjson = r.json() + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) " -/ideploy/rest/list/,successfully fetching the list of deployed islands using invalid name,"{""name"": ""invalid""}","{ -""status"":200, -""response"": list of deployed islands -}"," -def test_ideploy_list_invalid_name(run_api): - params = {""name"": ""invalid""} - r = run_api.ideploy_list(params) - test_assert.status(r, 200) +/ilibrary/rest/delete/{UUID}/,deleting island library without Authorization,"{ + uuid = 'valid-island-library-uuid' +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_ilib_delete_without_authentication(anonymous_exec_api): + """""" + Delete Island without authorization + """""" + r = anonymous_exec_api.ilibrary_delete(""valid-island-uuid"", {}) + test_assert.status(r, 401) + res = r.json() + assert res['detail'] == 'Authentication credentials were not provided.' " -/ideploy/rest/pause/{UUID}/,Check for the transition of state from Running to Paused,,Working as intended," -def test_ideploy_check_from_running_to_paused(run_api, ideploy_start): +/ilibrary/rest/delete/{UUID}/,deleting island library when requested with invalid token,"{ + uuid = 'invalid-island-library-uuid' +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_ilib_delete_invalid_token(invalid_exec_api): + """""" + Delete Island with invalid token + """""" + r = invalid_exec_api.ilibrary_delete(""invalid-island-uuid"", {}) + test_assert.status(r, 401) + res = r.json() + assert res['detail'] == 'Invalid token.' +" +/ilibrary/rest/delete/{UUID}/,deleting island library when Island UUID does not exist,"{ + uuid = 'invalid-island-library-uuid' +}","{ + ""status"": 404, + ""message"": ""Not found"" +}","def test_ilib_delete_invalid_uuid(run_api): + """""" + Delete Island with invalid uuid + """""" + r = run_api.ilibrary_delete(""invalid-island-uuid"", {}) + test_assert.status(r, 404) + assert res['detail'] == 'Not found.' +" +/ilibrary/rest/delete/{UUID}/,deleting island library for existing valid data,,"{ + ""status"": 204, + ""response"": Island library deleted +}","def test_ilib_delete(ilibrary_delete): + """""" + Deleting the Ilibrary + """""" + r = ilibrary_delete + test_assert.status(r, 204) +" +/ilibrary/rest/delete/{UUID}/,deleting an Island which has next revisions,"{ + uuid = 'valid-existing-island-library-uuid' +}","{ + ""status"": 400, + ""response"": failure +}","def test_ilib_delete_with_next_revision(run_api, ilibrary_add_new_island): """""" - test_ideploy_check_from_running_to_paused + Delete Island which has next revision """""" - x, r = ideploy_start - deploy_id = x[""deploy_uuid""] - res = run_api.ideploy_details(uuid=deploy_id).json() - initial_state = res[""state""] - if not initial_state == ""running"": - assert False, ""The machine is not in running state, current state of machine is %s"" % initial_state - run_api.ideploy_pause(uuid=deploy_id) - result = run_api.ideploy_details(uuid=deploy_id).json() - paused_network_segments = result['island']['network_segments'] - for pause in paused_network_segments: - if pause['name'] not in (""Default Public Segment"", ""HostOnly Segment""): - assert pause['status'] == ""inactive"", ""json |> %s"" % pause - final_state = result[""state""] - assert final_state == ""paused"", ""The error is %s"" % result + template, r = ilibrary_add_new_island + isl_uuid = r.json()[""uuid""] + r = run_api.ideploy_deploy(isl_uuid) + deploy_id = r.json()[""deploy_uuid""] + r, rtask_details = run_api.ideploy_snapshot(deploy_id) + run_api.ideploy_delete(deploy_id) + r = run_api.ilibrary_delete(isl_uuid, {}) + test_assert.status(r, 400) + run_api.ilibrary_delete(rtask_details['result']['snapshotted_island_uuid'], {}) + r = run_api.ilibrary_delete(isl_uuid, {}) " -/ideploy/rest/pause/{UUID}/,Pause a Deployment which you are not an owner of and without Admin rights,,401: UnAuthorized,"@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_pause_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): +/ilibrary/rest/delete/{UUID}/,deleting an Island which has existing deployments,"{ + uuid = 'valid-existing-island-library-uuid' +}","{ + ""status"": 400, + ""response"": failure +}","def test_ilib_delete_deployed(run_api, ilibrary_add_new_island): """""" - Pausing the Island by non-admin + Delete Island which has existing deployments """""" - # Non-admin check of Starting a deployment created by different user - deploy_id = custom_ilib_admin_operations - r = run_api.ideploy_pause(deploy_id) - test_assert.status(r, 403) + template, r = ilibrary_add_new_island + isl_uuid = r.json()[""uuid""] + r = run_api.ideploy_deploy(isl_uuid) + deploy_id = r.json()[""deploy_uuid""] + r = run_api.ilibrary_delete(isl_uuid, {}) + test_assert.status(r, 400) + run_api.ideploy_delete(deploy_id) " -/ideploy/rest/pause/{UUID}/,Pause a Deployment which you are not an owner of but with Admin rights,,200 : job created,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_pause_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): +/ilibrary/rest/delete/{UUID}/,deleting a public Island by user with Admin rights but not owner of the library,"{ + uuid = 'valid-existing-island-library-uuid' +}","{ + ""status"": 204, + ""response"": Island library deleted +}","def test_public_ilib_delete_admin(skip_if_not_admin, run_api, non_admin_exec_api): """""" - Pausing the Island by Admin + Deleting the public Ilibrary by Admin """""" - # Admin check of Starting a deployment created by different user - deploy_id = custom_ilib_non_admin_operations - r = run_api.ideploy_pause(deploy_id) - test_assert.status(r, 201) -" -/ideploy/rest/pause/{UUID}/,pausing the deployment of island machine when requested with invalid token,"{ -uid =""invalid"" -} + # Admin check for deleting the public Ilibrary created by different user. + networks = template_networks() + params, r_lib = non_admin_exec_api.library_add_new_vm(networks=networks) + rjson_lib = r_lib.json() -","{ - ""status"" : 404, - ""message"" : ""Deployed island doesnt exist."" -}","def test_ideploy_pause_with_invalid_uuid(run_api): + machine = { + ""uuid"": rjson_lib[""uuid""], + ""nic_update_id"": rjson_lib[""hw""][""networks""][0][""id""], + ""nic_delete_id"": rjson_lib[""hw""][""networks""][2][""id""] + } + island = template_add_ilibrary_one_machine(machine=machine) + island['is_public'] = True + params, r_isl = non_admin_exec_api.ilibrary_add_new_island(params=island) + rjson_isl = r_isl.json() + ilib_id = rjson_isl[""uuid""] + r = run_api.ilibrary_delete(ilib_id, {}) + test_assert.status(r, 204) + if 'error' not in rjson_lib.keys(): + uuid = rjson_lib[""uuid""] + non_admin_exec_api.library_delete(uuid, params) +" +/ilibrary/rest/delete/{UUID}/,deleting a public Island by an non-admin user who does not own the library,"{ + uuid = 'valid-existing-island-library-uuid' +}","{ + ""status"": 403, + ""response"": unauthorized +}","def test_public_ilib_delete_non_admin(skip_if_not_non_admin, run_api, admin_exec_api, non_admin_exec_api): """""" - test_ideploy_pause_without_authorization + Deleting the public Ilibrary by Non-Admin """""" - uid = ""invalid"" - r = run_api.ideploy_pause(uuid=uid) - test_assert.status(r, 404) - assert r.json()[""error""] == ""Deployed Island Doesn't Exist"" + # Non-Admin check for deleting the public Ilibrary created by different user. + networks = template_networks() + params, r_lib = admin_exec_api.library_add_new_vm(networks=networks) + rjson_lib = r_lib.json() + + machine = { + ""uuid"": rjson_lib[""uuid""], + ""nic_update_id"": rjson_lib[""hw""][""networks""][0][""id""], + ""nic_delete_id"": rjson_lib[""hw""][""networks""][2][""id""] + } + island = template_add_ilibrary_one_machine(machine=machine) + island['is_public'] = True + params, r_isl = admin_exec_api.ilibrary_add_new_island(params=island) + rjson_isl = r_isl.json() + ilib_id = rjson_isl[""uuid""] + r = run_api.ilibrary_delete(ilib_id, {}) + test_assert.status(r, 403) + if 'error' not in rjson_isl.keys(): + uuid = rjson_isl[""uuid""] + admin_exec_api.ilibrary_delete(uuid, params) + if 'error' not in rjson_lib.keys(): + uuid = rjson_lib[""uuid""] + admin_exec_api.library_delete(uuid, params) + " -/ideploy/rest/pause/{UUID}/,snapshotting the deployed island machine,,"{ -""status"":201 -}","def test_ideploy_pause_self(ideploy_pause): +/ilibrary/rest/delete/{UUID}/,deleting a Private Island by an admin user where the admin does not own the island,"{ + uuid = 'valid-existing-island-library-uuid' +}","{ + ""status"": 204, + ""response"": Island library deleted +}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] + +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ilib_delete_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): """""" - Pausing the Island + Deleting the private Ilibrary by Admin """""" - r = ideploy_pause - test_assert.status(r, 201) + # Admin check for deleting the private Ilibrary created by different user. + ilib_id = custom_ilib_non_admin_operations + r = run_api.ilibrary_delete(ilib_id, {}) + test_assert.status(r, 204) " -/ideploy/rest/pause/{UUID}/,pausing the deployment of a deployed island machine when requested with invalid token,"{ -uid =""invalid"" +/ilibrary/rest/delete/{UUID}/,deleting a Island by manager,"{ + uuid = 'valid-existing-island-library-uuid' }","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_ideploy_pause_with_invalid_token(invalid_exec_api): + ""status"": 403 +}","endpoint = ""ilibrary_delete"" + +PARAMETERS = [{""dest_obj"": OBJ_ISL}] + +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ilib_delete_manager(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): """""" - test_ideploy_pause_with_invalid_token + Delete the Ilibrary by Manager """""" - uid = ""Invalid"" - r = invalid_exec_api.ideploy_pause(uuid=uid, wait=False) - test_assert.status(r, 401) - assert r.json()[""detail""] == ""Invalid token."" + # When the user is not part of the group that the manager manages + ilib_id = custom_ilib_admin_operations + r = run_api.ilibrary_delete(ilib_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) + + # When the user is part of the group that the manager manages + ilib_id = custom_ilib_non_admin_operations + r = run_api.ilibrary_delete(ilib_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) " -/ideploy/rest/pause/{UUID}/,pausing the deployment of a deployed island machine without authorization,"{ -uid =""invalid"" +/ilibrary/rest/delete/{UUID}/,deleting a Island by an non-admin user who does not own the library,"{ + uuid = 'valid-existing-island-library-uuid' }","{ - ""status"" : 401, - ""message"" : ""Authentication credentials were not provided."" -}","def test_ideploy_pause_without_authorization(anonymous_exec_api): + ""status"": 403, + ""response"": unauthorized +}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ilib_delete_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): """""" - test_ideploy_pause_without_authorization + Deleting the private Ilibrary by non-Admin """""" - uid = ""invalid"" - r = anonymous_exec_api.ideploy_pause(uuid=uid, wait=False) - test_assert.status(r, 401) - assert r.json()[""detail""] == ""Authentication credentials were not provided."" + # Non-admin check for deleting the private Ilibrary created by different user. + ilib_id = custom_ilib_admin_operations + r = run_api.ilibrary_delete(ilib_id, {}) + test_assert.status(r, 403) " -/ideploy/rest/pause/{UUID}/,island deployment for a machine from running state to paused state,,"{ -""response"": machine paused -}","def test_ideploy_check_from_running_to_paused(run_api, ideploy_start): +/ilibrary/rest/details/{UUID}/,fetching details of public machines present in private island library,"{ + ""name"": ""Machine1"", + ""is_public"": False, + ""machines"": { + ""add"": [machine1], + }, + }","{ + ""response"" : success +}","def test_ilibrary_details_with_private_island_with_public_machine(run_api): """""" - island deploy from running state to paused state + To check machine type with public island """""" - x, r = ideploy_start - deploy_id = x[""deploy_uuid""] - res = run_api.ideploy_details(uuid=deploy_id).json() - initial_state = res[""state""] - if not initial_state == ""running"": - assert False, ""The machine is not in running state, current state of machine is %s"" % initial_state - run_api.ideploy_pause(uuid=deploy_id) - result = run_api.ideploy_details(uuid=deploy_id).json() - paused_network_segments = result['island']['network_segments'] - for pause in paused_network_segments: - if pause['name'] not in (""Default Public Segment"", ""HostOnly Segment""): - assert pause['status'] == ""inactive"", ""json |> %s"" % pause - final_state = result[""state""] - assert final_state == ""paused"", ""The error is %s"" % result + params1, r1 = run_api.library_add_new_vm(networks=networks, is_public=True) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + }, + + ], + } + + } + params = { + ""name"": ""Machine1"", + ""is_public"": False, + ""machines"": { + ""add"": [machine1], + }, + } + params, r = run_api.ilibrary_add_new_island(params=params) + result = r.json()[""machines""] + island_id = r.json()[""uuid""] + run_api.ilibrary_delete(uuid=island_id) + run_api.library_delete(r1.json()[""uuid""]) + for machine in result: + if machine[""is_public""]: + assert False, ""The machine is still public in private island and the json is %s"" % r.json() + + " -/ideploy/rest/resume/{UUID}/,Resume a Deployment which you are not an owner of and without Admin rights,,403: UnAuthorized,"@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_resume_without_owner_and_admin(skip_if_admin, custom_ilib_admin_operations, run_api): +/ilibrary/rest/details/{UUID}/,fetching details of private island library from public island,"{ + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1], + }, + }","{ + ""response"" : success +}","def test_ilibrary_details_with_edit_public_island_to_private_island(skip_if_not_admin, run_api): """""" - Resume a Deployment which you are not an owner of and without Admin rights + To check machine type with private island """""" - deploy_id = custom_ilib_admin_operations - r = run_api.ideploy_resume(deploy_id) - test_assert.status(r, 403) - rjson = r.json() - assert rjson['error'] == 'You do not have permission to perform this action.', ""json |> %s"" % rjson + params1, r1 = run_api.library_add_new_vm(networks=networks) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + }, + + ], + } + + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1], + }, + } + params, r = run_api.ilibrary_add_new_island(params=params) + island_id = r.json()[""uuid""] + params, r = run_api.ilibrary_edit_island(uuid=island_id, params={""is_public"": False}) + res = r.json()[""machines""] + run_api.ilibrary_delete(uuid=island_id) + run_api.library_delete(r1.json()[""uuid""]) + for machine in res: + if machine[""is_public""]: + assert False, ""The json is %s"" % r.json() " -/ideploy/rest/resume/{UUID}/,Resume a Deployment which you are not an owner of but with Admin rights,,201 : job created," -@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_resume_with_admin_rights(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): +/ilibrary/rest/details/{UUID}/,fetching details of island library without Authorization,"{ + uid = ""valid"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_ilibrary_details_without_authorization(anonymous_exec_api): """""" - Resume a Deployment which you are not an owner of but with Admin rights + Details of Ilibrary without authorization """""" - deploy_id = custom_ilib_non_admin_operations - r = run_api.ideploy_resume(deploy_id) - test_assert.status(r, 201) + uid = ""valid"" + r = anonymous_exec_api.ilibrary_details(uuid=uid) + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Authentication credentials were not provided."" " -/ideploy/rest/resume/{UUID}/,resuming the deployment of island machine when requested with invalid token,"{ -uid =""invalid"" -} +/ilibrary/rest/details/{UUID}/,fetching details of island library with no NIC and island type is private,"{ + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, -","{ - ""status"" : 404, - ""message"" : ""Deployed island doesnt exist."" -}","def test_ideploy_resume_invalid_uuid(run_api): - """""" - When provided with invalid uuid - """""" - uuid = ""invalid"" - r = run_api.ideploy_resume(uuid) - test_assert.status(r, 404) - rjson = r.json() - assert rjson[""error""] == ""Deployed Island Doesn't Exist"", 'The error is %s' % rjson[""error""] -" -/ideploy/rest/resume/{UUID}/,snapshotting the deployed island machine,,"{ -""status"":201 -}","def test_ideploy_resume(ideploy_resume): + }","{ + ""response"" : success +}","def test_ilibrary_details_with_island_type_Zero_NIC(run_api, library_add_three_vm): """""" - When provided with valid uuid + Detail of island_type when all machines have assigned with No NIC's and island type is private """""" - r = ideploy_resume - test_assert.status(r, 201) + r1, r2, r3 = library_add_three_vm + machine1 = { + ""uuid"": r1.json()[""uuid""], + + } + machine2 = { + ""uuid"": r2.json()[""uuid""], + + } + machine3 = { + ""uuid"": r3.json()[""uuid""], + + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, + + } + + params, r = run_api.ilibrary_add_new_island(params=params) + island_type = r.json()[""island_type""] + island_id = r.json()[""uuid""] + run_api.ilibrary_delete(uuid=island_id) + assert island_type == ""private"", ""The json is %s"" % r.json() " -/ideploy/rest/resume/{UUID}/,resuming the deployment of a deployed island machine when requested with invalid token,"{ -uuid =""invalid"" +/ilibrary/rest/details/{UUID}/,fetching details of island library with invalid token,"{ + uid = ""invalid"" }","{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_ideploy_resume_invalid_token(invalid_exec_api): +}","def test_ilibrary_details_with_invalid_token(invalid_exec_api): """""" - When provided with invalid token + Details of Ilibrary with invalid token """""" - uuid = ""invalid"" - r = invalid_exec_api.ideploy_resume(uuid, wait=False) + uid = ""invalid"" + r = invalid_exec_api.ilibrary_details(uuid=uid) + result = r.json() test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""The error message is %s"" % rjson['detail'] + assert result['detail'] == ""Invalid token."" " -/ideploy/rest/resume/{UUID}/,resuming the deployment of a deployed island machine without authorization,"{ -uuid =""invalid"" +/ilibrary/rest/details/{UUID}/,fetching details of island library where island type is set to public,"{ + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, }","{ - ""status"" : 401, - ""message"" : ""Authentication credentials were not provided."" -}","def test_ideploy_resume_without_authorization(anonymous_exec_api): + ""response"" : success +}","def test_ilibrary_details_with_island_type_public(run_api, library_add_three_vm): """""" - When provided without authorization + Detail of island_type when all machines have NIC as Default Public Segment """""" - uuid = ""invalid"" - r = anonymous_exec_api.ideploy_resume(uuid, wait=False) - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""The error message is %s"" % rjson['detail'] + r1, r2, r3 = library_add_three_vm + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + } + ], + } -" -/ideploy/rest/resume/{UUID}/,successful island deployment for a machine from paused state to running state,,"{ -""response"": machine paused -}","def test_ideploy_resume_checking_state_paused_to_running(ideploy_start, run_api): - """""" - Check for the transition of state from Paused to Running - """""" - res, r = ideploy_start - deploy_id = res[""deploy_uuid""] - run_api.ideploy_pause(deploy_id) - paused_r = run_api.ideploy_details(deploy_id) - paused_rjson = paused_r.json() - assert paused_rjson['state'] == 'paused', ""json |> %s"" % paused_rjson - run_api.ideploy_resume(deploy_id) - resume_r = run_api.ideploy_details(deploy_id) - resume_rjson = resume_r.json() - assert resume_rjson['state'] == 'running', ""json |> %s"" % resume_rjson -" -/ideploy/rest/resume/{UUID}/,island deployment for a machine from paused state to running state by a manager who does not have permissions over the server,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_ideploy_resume_by_manager_without_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): - """""" - Ideploy resume by manager without server right - """""" - # When Manager manages the user but not the server - deploy_id = custom_ilib_non_admin_operations - r = run_api.ideploy_resume(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + } + machine2 = { + ""uuid"": r2.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + } + ], + } - # when manager does not manage the user nor the server - deploy_id = custom_ilib_admin_operations - r = run_api.ideploy_resume(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) -" -/ideploy/rest/resume/{UUID}/,starting the segment of the island by manager when he have rights over the server,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_ideploy_segment_start_manager_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): - """""" - Starting the segment of the Island by manager when have right on server - """""" - # When the user is not part of the group that the manager manages - seg_id = custom_ilib_admin_operations - r = run_api.ideploy_segment_start(seg_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) - run_api.ideploy_segment_stop(seg_id) + } + machine3 = { + ""uuid"": r3.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + } + ], + } + + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, + } - # When the user is part of the group that the manager manages and deployment is on manager rights to server - seg_id = custom_ilib_non_admin_operations - r = run_api.ideploy_segment_start(seg_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) - run_api.ideploy_segment_stop(seg_id) + params, r = run_api.ilibrary_add_new_island(params=params) + island_type = r.json()[""island_type""] + island_id = r.json()[""uuid""] + run_api.ilibrary_delete(uuid=island_id) + assert island_type == ""public"", ""The json is %s"" % r.json() " -/ideploy/rest/segment_start/,starting the segment of the island by manager when he does not have right over the server,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_ideploy_segment_start_manager_no_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): +/ilibrary/rest/details/{UUID}/,fetching details of island library when private machine is added to public island,"{ + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1], + }, + }","{ + ""response"" : success +}","def test_ilibrary_details_with_public_island_with_private_machine(run_api): """""" - Starting the segment of the Island by manager when have no right on server + To check machine type when Private machine is added to public island """""" - # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server - seg_id = custom_ilib_admin_operations - r = run_api.ideploy_segment_start(seg_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) - run_api.ideploy_segment_stop(seg_id) + params1, r1 = run_api.library_add_new_vm(networks=networks, is_public=False) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""is_public"": False, + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + }, + + ], + } + + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1], + }, + } + params, r = run_api.ilibrary_add_new_island(params=params) + result = r.json()[""machines""] + island_id = r.json()[""uuid""] + run_api.ilibrary_delete(uuid=island_id) + run_api.library_delete(r1.json()[""uuid""]) + for machine in result: + if not machine[""is_public""]: + assert False, ""The machine is still private in public island and the json is %s"" % r.json() - # When the user is part of the group that the manager manages but the deployment is not on manager rightful server - seg_id = custom_ilib_non_admin_operations - r = run_api.ideploy_segment_start(seg_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) - run_api.ideploy_segment_stop(seg_id) -" -/ideploy/rest/segment_stop/,stopping the segment of an island by admin user,,"{ -""status"" : 201, -""response"": segment stopped -}","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_segment_stop_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): - """""" - Stopping the segment of an Island by Admin - """""" - # Admin check of Stopping a deployment created by different user - seg_id = custom_ilib_non_admin_operations - r = run_api.ideploy_segment_stop(seg_id) - test_assert.status(r, 201) -" -/ideploy/rest/segment_stop/,stopping the segment of an island by a non-admin user,,"{ -""status"" : 403 -}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_segment_stop_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): - """""" - Stopping the segment of an Island by non-admin - """""" - # Non-admin check of Stopping a deployment createdan different user - seg_id = custom_ilib_admin_operations - r = run_api.ideploy_segment_stop(seg_id) - test_assert.status(r, 403) " -/ideploy/rest/segment_stop/,"stopping the segment of an island machine by a manager type of user, where the manager have rights over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_ideploy_segment_stop_manager_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): +/ilibrary/rest/details/{UUID}/,fetching details of island library when island has one machine nic as Default and other machine nic as empty and island type is partial,"{ + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, + + }","{ + ""response"" : success +}","def test_ilibrary_details_with_island_type_partial(run_api, library_add_three_vm): """""" - Stopping the segment of an Island by manager when have right on server + Detail of island_type when island has one machine nic as Default and other machine nic as empty and island type is partial """""" - # When the user is not part of the group that the manager manages - seg_id = custom_ilib_admin_operations - r = run_api.ideploy_segment_stop(seg_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + r1, r2, r3 = library_add_three_vm + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + } + ], + } - # When the user is part of the group that the manager manages and deployment is on manager rights to server - seg_id = custom_ilib_non_admin_operations - r = run_api.ideploy_segment_stop(seg_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + } + machine2 = { + ""uuid"": r2.json()[""uuid""], + + } + machine3 = { + ""uuid"": r3.json()[""uuid""], + + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, + + } + params, r = run_api.ilibrary_add_new_island(params=params) + island_type = r.json()[""island_type""] + island_id = r.json()[""uuid""] + run_api.ilibrary_delete(uuid=island_id) + assert island_type == ""partial"", ""The json is %s"" % r.json() " -/ideploy/rest/segment_stop/,"stopping the segment of an island machine by a manager type of user, where the manager do not have rights over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_ideploy_segment_stop_manager_no_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): +/ilibrary/rest/details/{UUID}/,fetching details of island library when all machines are assigned with multiple NICs and island type is public,"{ + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, + ""network_segments"": { + ""add"": [ + { + ""name"": ""Seg1"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": ""Seg2"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": ""Seg3"", + ""description"": ""string"", + ""enable_ipv4"": False + } + ] + } + }","{ + ""response"" : success +}","def test_ilibrary_details_with_island_type_public_with_three_segement(run_api, library_add_three_vm): """""" - Stopping the segment of an Island by manager when have no right on server + Detail of island_type when island all machines are assigned with multiple NICs and island type is public """""" - # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server - seg_id = custom_ilib_admin_operations - r = run_api.ideploy_segment_stop(seg_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + r1, r2, r3 = library_add_three_vm + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Default Public Segment"" + }, + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg1"" + } + ], + } + + } + machine2 = { + ""uuid"": r2.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg1"" + }, + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg2"" + } + ], + } + + } + machine3 = { + ""uuid"": r3.json()[""uuid""], + ""nics"": { + ""add"": [ + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg2"" + }, + { + ""mac"": ""auto"", + ""type"": ""bridge"", + ""model"": networks[0].get(""model"", ""virtio""), + ""segment"": ""Seg3"" + } + ], + } + + } + params = { + ""name"": ""Machine1"", + ""is_public"": True, + ""machines"": { + ""add"": [machine1, machine2, machine3], + }, + ""network_segments"": { + ""add"": [ + { + ""name"": ""Seg1"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": ""Seg2"", + ""description"": ""string"", + ""enable_ipv4"": False + }, + { + ""name"": ""Seg3"", + ""description"": ""string"", + ""enable_ipv4"": False + } + ] + } - # When the user is part of the group that the manager manages but the deployment is not on manager rightful server - seg_id = custom_ilib_non_admin_operations - r = run_api.ideploy_segment_stop(seg_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + } + params, r = run_api.ilibrary_add_new_island(params=params) + island_type = r.json()[""island_type""] + island_id = r.json()[""uuid""] + run_api.ilibrary_delete(uuid=island_id) + assert island_type == ""public"", ""The json is %s"" % r.json() " -/ideploy/rest/segment_stop/,"stopping the segment of an island machine by a manager type of user, where the manager do not have rights over the servers",,"{ -""status"" : 201, -""response"": segment stopped -}","def test_ideploy_segment_stop_valid_uuid(run_api, ideploy_details): +/ilibrary/rest/details/{UUID}/,fetching details of island library provided with valid UUID,"{ +ilib_id +}","{ + ""status"": 200, + ""response"" : success +}","def test_ilibrary_details_with_valid_uuid(run_api, ilibrary_add_new_island): """""" - Stopping the segment of the Island + Details of Ilibrary with valid uuid """""" - param, result = ideploy_details - seg_id = result.json()[""island""][""network_segments""][2][""uuid""] - r = run_api.ideploy_segment_start(seg_id) - r = run_api.ideploy_segment_stop(seg_id) - test_assert.status(r, 201) + params, r = ilibrary_add_new_island + lib_uuid = r.json()[""uuid""] + result = run_api.ilibrary_list_island(params={""uuid"": lib_uuid}) + x = result.json() + test_assert.status(result, 200) + for island_lib in x['results']: + assert island_lib['uuid'] == lib_uuid, ""Json is %s"" % x " -/ideploy/rest/segment_stop/,stopping the segment of a island machine using invalid deployment uuid,"{ -seg_id = ""invalid"" +/ilibrary/rest/details/{UUID}/,fetching details of island library provided with invalid UUID,"{ + uid = ""invalid"" }","{ -""status"" : 404, -""response"": Failure -}","def test_ideploy_segment_stop_invalid_uuid(run_api): + ""status"": 404, + ""message"": ""Not Found"" +}","def test_ilibrary_details_with_invalid_uuid(run_api): """""" - Stopping the segment of the Island + Details of Ilibrary with invalid uuid """""" - seg_id = ""invalid"" - r = run_api.ideploy_segment_stop(seg_id) + uid = ""invalid"" + r = run_api.ilibrary_details(uuid=uid) test_assert.status(r, 404) " -/ideploy/rest/segment_stop/,stopping the segment of a island machine without authorization,"{ -seg_id = ""invalid"" +/ilibrary/rest/details/{UUID}/,fetching details of island library by non-admin user,"{ +lib_id }","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided."" + ""status"": 403, + ""message"": ""You do not have permission to perform this action."" +}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] -}","def test_ideploy_segment_stop_without_authorization(anonymous_exec_api): +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_details_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): """""" - Stopping the segment of the Island without authorization + Details of Ilibrary by non-Admin """""" - seg_id = ""invalid"" - seg_stop = anonymous_exec_api.ideploy_segment_stop(seg_id, wait=False) - seg_json = seg_stop.json() - test_assert.status(seg_stop, 401) - assert seg_json[""detail""] == ""Authentication credentials were not provided."" + # Non-admin check for fetching details of the Ilibrary created by different user. + ilibrary_id = custom_ilib_admin_operations + r = run_api.ilibrary_details(ilibrary_id) + test_assert.status(r, 403) + assert r.json()[""error""] == ""You do not have permission to perform this action."" " -/ideploy/rest/segment_stop/,stopping the segment of a island machine using invalid token,"{ -seg_id = ""invalid"" -}","{ -""status"" : 401, -""message"" : ""Invalid token."" +/ilibrary/rest/details/{UUID}/,fetching details of island library by manager,"{ +ilib_id +}",,"endpoint = ""ilibrary_details"" +networks = template_networks() -}","def test_ideploy_segment_stop_invalid_token(invalid_exec_api): +PARAMETERS = [{""dest_obj"": OBJ_ISL}] +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_details_manager(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): """""" - Stopping the segment of the Island using invalid token + Details of Ilibrary by Manager """""" - seg_id = ""invalid"" - seg_stop = invalid_exec_api.ideploy_segment_stop(seg_id, wait=False) - seg_json = seg_stop.json() - test_assert.status(seg_stop, 401) - assert seg_json[""detail""] == ""Invalid token."" -" -/ideploy/rest/segment_stop/,"stopping the segment of a island machine, where the segment is non-deployable. check the user type before performing the operation, only admin user type have the permission to perform such operations.",,"{ -""status"" : 400, -""message"" : ""No operation is allowed on the segment, as it is part of the library"""" + # When the user is not part of the group that the manager manages + ilibrary_id = custom_ilib_admin_operations + r = run_api.ilibrary_details(ilibrary_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) -}","def test_ideploy_stop_non_deployment_segment(ilibrary_details, run_api): + # When the user is part of the group that the manager manages + ilibrary_id = custom_ilib_non_admin_operations + r = run_api.ilibrary_details(ilibrary_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) +" +/ilibrary/rest/details/{UUID}/,fetching details of island library by admin user,"{ +ilib_id +}","{ + ""status"": 200, + ""response"": success +}","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_details_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): """""" - Stop a Segment which is part of Island (not Deployment) + Details of Ilibrary by Admin + """""" + # Admin check for fetching details of the Ilibrary created by different user. + ilibrary_id = custom_ilib_non_admin_operations + r = run_api.ilibrary_details(ilibrary_id) + test_assert.status(r, 200) +" +/ilibrary/rest/details/{UUID}/,fetching details of island library,,"{ + ""status"": 200, + ""response"" : success +}","def test_ilibrary_details(ilibrary_details): + """""" + Getting Ilibrary details """""" r = ilibrary_details + test_assert.status(r, 200) +" +/ilibrary/rest/edit/{UUID}/,updating two segments giving same name,"params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + }, + { + 'name': 'network2' + } + ] + }, + } +params1 = { + 'network_segments': { + 'update': [ + { + 'uuid': seg_id, + 'name': 'network2' + } + ] + }, + ""is_public"": True + }","{ + ""status"": 400, + ""message"": ""The segment name(s) is/are already taken for other island"" +}","def test_ilibrary_edit_update_segment_with_existing_name(run_api): + """""" + Editing an Island Library segment with existing name + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + }, + { + 'name': 'network2' + } + ] + }, + } + params, r = run_api.ilibrary_add_new_island(params=params) rjson = r.json() - uuid = rjson['network_segments'][2]['uuid'] - res = run_api.ideploy_segment_start(uuid) - res = run_api.ideploy_segment_stop(uuid) - if run_api.user_type == USER_TYPE['non_admin']: - test_assert.status(res, 403) - result = res.json() - assert result['error'] == ""You do not have permission to perform this action."" - else: - test_assert.status(res, 400) - result = res.json() - assert result['error'] == f""No operation is allowed on {rjson['network_segments'][2]['name']} , as it is part of the library"" + uuid = rjson[""uuid""] + segment_list = rjson['network_segments'] + for segment in segment_list: + if segment['name'] == 'network1': + seg_id = segment['uuid'] + params1 = { + 'network_segments': { + 'update': [ + { + 'uuid': seg_id, + 'name': 'network2' + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params1) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""The segment name(s) {'network2'} is/are already taken for the island"" + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) " -/ideploy/rest/segment_stop/,"stopping the segment of a island machine by changing the state of the machine from ""running"" to ""mixed"", where the island is already in running state","{ - ""machine_list"": machine_uuids, - ""op"": ""poweroff"" -}","{ -""response"" :operation successful -}","def test_ideploy_segment_stop_check_state_running_to_mixed(run_api, ideploy_start): +/ilibrary/rest/edit/{UUID}/,updating two segment with same name,"params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + }, + { + 'name': 'network2' + } + ] + } + + } +params1 = { + 'network_segments': { + 'update': [ + { + 'uuid': seg1_id, + 'name': 'network3' + }, + { + 'uuid': seg2_id, + 'name': 'network3' + } + ] + }, + ""is_public"": True + }","{ + ""status"": 400, + ""message"": ""Segment name should be unique for an island"" +}","def test_ilibrary_edit_update_two_segment_same_name(run_api): """""" - Check for the transition of state from Running to Mixed (if Island state was Running) + Editing an Island Library Update Segment with same name """""" - x, r = ideploy_start - deploy_id = x[""deploy_uuid""] - result = run_api.ideploy_details(deploy_id) - assert result.json()['state'] == ""running"", ""The error is %s"" % (result.json()['state']) + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + }, + { + 'name': 'network2' + } + ] + } - machine_uuids = [mc[""uuid""] for mc in result.json()[""machines""]] - deploy_bulkops_params = { - ""machine_list"": machine_uuids, - ""op"": ""poweroff"" } - run_api.deploy_bulkops(deploy_bulkops_params) - seg_ids = [segment[""uuid""] for segment in result.json()[""island""][""network_segments""]][2:4] - run_api.ideploy_segment_start(seg_ids[0]) - run_api.ideploy_segment_start(seg_ids[1]) - run_api.ideploy_segment_stop(seg_ids[0]) - r = run_api.ideploy_details(deploy_id) + params, r = run_api.ilibrary_add_new_island(params=params) rjson = r.json() - assert rjson['state'] == ""mixed"", ""The error is %s"" % (rjson) + uuid = rjson[""uuid""] + segment_list = rjson['network_segments'] + + for segment in segment_list: + if segment['name'] == 'network1': + seg1_id = segment['uuid'] + if segment['name'] == 'network2': + seg2_id = segment['uuid'] + params1 = { + 'network_segments': { + 'update': [ + { + 'uuid': seg1_id, + 'name': 'network3' + }, + { + 'uuid': seg2_id, + 'name': 'network3' + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params1) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""Segment name should be unique for an island"" + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) " -/ideploy/rest/segment_stop/,"stopping the segment of a island machine by changing the state of the machine from ""stopped"" to ""stopped"", where the island state and segment state is already in ""stopped"" state",,"{ -""response"" :operation successful -}","def test_ideploy_segment_stop_check_state_of_segments(ideploy_details, run_api): +/ilibrary/rest/edit/{UUID}/,updating the same segment twice in a single API,"params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + } + } +params1 = { + 'network_segments': { + 'update': [ + { + 'uuid': seg_id, + 'name': 'network2' + }, + { + 'uuid': seg_id, + 'name': 'network2' + } + ] + }, + ""is_public"": True + }","{ + ""status"": 400, + ""message"": ""Segment name should be unique for an island"" +}","def test_ilibrary_edit_update_same_segment_twice(run_api): """""" - Check for the transition of state from Stopped to Stopped (if Island state was Stopped and all Segments are Stopped) + Editing an Island Library Update same Segment twice in a single API """""" - param, result = ideploy_details - deploy_id = param[""deploy_uuid""] + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + } - seg_id = result.json()[""island""][""network_segments""][2][""uuid""] - result = run_api.ideploy_segment_stop(seg_id) - result = run_api.ideploy_details(deploy_id) - rjson = result.json() - segments = [segment for segment in rjson[""island""][""network_segments""]][2:4] + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + segment_list = rjson['network_segments'] - assert rjson['state'] == 'stopped', ""The error is %s"" % rjson - machines = rjson['machines'] - for machine in machines: - assert machine['state'] == 'stopped', ""The error is %s"" % (machine) - for segment in segments: - assert segment['status'] == 'inactive', ""The error is %s"" % (segment) -" -/ideploy/rest/segment/start/{UUID}/,starting the segment of deployed island machine,,"{ -""status"":201 -}","def test_ideploy_start_self(ideploy_start): - """""" - Start the Island - """""" - x, r = ideploy_start - test_assert.status(r, 201) -" -/ideploy/rest/segment/start/{UUID}/,starting a segment of a deployed island machine when requested with invalid token,"{ -uid =""invalid"" -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_ideploy_segment_start_with_invalid_token(invalid_exec_api): - """""" - test_ideploy_with_invalid_token - """""" - uid = ""invalid"" - r = invalid_exec_api.ideploy_start(uuid=uid, wait=False) - test_assert.status(r, 401) - assert r.json()[""detail""] == ""Invalid token."" -" -/ideploy/rest/segment/start/{UUID}/,starting the segment of a deployed island machine where the segment UUID does not exist,"{ -uid =""invalid"" -}","{ - ""status"" : 404, - ""message"" : ""Deployed island doesnt exist."" -}"," -def test_ideploy_serment_start_with_invalid_uuid(run_api): - """""" - segment UUID does not exist - """""" - uid = ""invalid"" - r = run_api.ideploy_start(uuid=uid) - test_assert.status(r, 404) - assert r.json()[""error""] == ""Deployed Island Doesn't Exist"" -" -/ideploy/rest/segment/start/{UUID}/,starting a segment of a deployed island machine without Authorization,"{ -uid =""invalid"" -}","{ - ""status"" : 401, - ""message"" : ""Authentication credentials were not provided."" -}","def test_ideploy_segment_start_without_authorization(anonymous_exec_api): - """""" - test_ideploy_without_authorization - """""" - uid = ""invalid"" - r = anonymous_exec_api.ideploy_start(uuid=uid, wait=False) - test_assert.status(r, 401) - assert r.json()[""detail""] == ""Authentication credentials were not provided."" -" -/ideploy/rest/segment/start/{UUID}/,"starting island deployment for all segments of an island machine -","machine1 = { - ""uuid"": r1.json()[""uuid""], - ""nics"": { - ""add"": [ + for segment in segment_list: + if segment['name'] == 'network1': + seg_id = segment['uuid'] + + params = { + 'network_segments': { + 'update': [ { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Default Public Segment"" + 'uuid': seg_id, + 'name': 'network2' }, { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Seg1"" + 'uuid': seg_id, + 'name': 'network2' } - ], - } - + ] + }, + ""is_public"": True } -params = { - ""name"": ""Machine1"", - ""is_public"": True, + params, r = run_api.ilibrary_edit_island(uuid, params=params1) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""Segment name should be unique for an island"" + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) + +" +/ilibrary/rest/edit/{UUID}/,updating the same machine more than once,"params3 = { + ""name"": ""test_ilibrary"", ""machines"": { - ""add"": [machine1], - }, - ""network_segments"": { ""add"": [ { - ""name"": ""Seg1"", - ""description"": ""string"", - ""enable_ipv4"": False - }, + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } +params3 = { + ""machines"": { + ""update"": [ { - ""name"": ""Seg2"", - ""description"": ""string"", - ""enable_ipv4"": False + ""uuid"": machine_uuid, + 'description': 'description' }, + { + ""uuid"": machine_uuid, + 'description': 'desc' + } ] - } + }, + ""is_public"": False }","{ -""response"": operation successful -}","def test_ideploy_deployment_starts_all_segment(run_api): + ""status"": 400, + ""message"": ""Updating the same machine more than once may result in an Unexpected value change. Hence, Aborting..."" +}","def test_ilibrary_edit_update_same_machine_twice(run_api): """""" - starting island deployment for all segments + Editing an Island Library by updating same machine twice """""" networks = template_networks() - params1, r1 = run_api.library_add_new_vm(networks=networks) - machine1 = { - ""uuid"": r1.json()[""uuid""], - ""nics"": { + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + + params3 = { + ""name"": ""test_ilibrary"", + ""machines"": { ""add"": [ { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Default Public Segment"" - }, - { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Seg1"" + ""uuid"": r1.json()[""uuid""] } - ], - } - + ] + }, + ""is_public"": False } - params = { - ""name"": ""Machine1"", - ""is_public"": True, + params3, r3 = run_api.ilibrary_add_new_island(params=params3) + rjson3 = r3.json() + uuid = rjson3['uuid'] + machine_uuid = rjson3['machines'][0]['uuid'] + + params3 = { ""machines"": { - ""add"": [machine1], - }, - ""network_segments"": { - ""add"": [ + ""update"": [ { - ""name"": ""Seg1"", - ""description"": ""string"", - ""enable_ipv4"": False + ""uuid"": machine_uuid, + 'description': 'description' }, { - ""name"": ""Seg2"", - ""description"": ""string"", - ""enable_ipv4"": False - }, + ""uuid"": machine_uuid, + 'description': 'desc' + } ] - } - } - params, r = run_api.ilibrary_add_new_island(params=params) - island_uuid = r.json()[""uuid""] - res = run_api.ideploy_deploy(uuid=island_uuid) - deploy_uuid = res.json()[""deploy_uuid""] - run_api.ideploy_start(deploy_uuid) - r_details = run_api.ideploy_details(deploy_uuid) - result = r_details.json() - segment_list = result[""island""][""network_segments""] - for segment in segment_list: - if segment[""status""] != ""active"": - assert False, ""The error is %s"" % result - machine_uuids = [mc[""uuid""] for mc in r_details.json()[""machines""]] - deploy_bulkops_params = { - ""machine_list"": machine_uuids, - ""op"": ""poweroff"" + }, + ""is_public"": False } - run_api.deploy_bulkops(deploy_bulkops_params) - run_api.ideploy_shutdown(deploy_uuid) - run_api.ideploy_delete(uuid=deploy_uuid) - run_api.ilibrary_delete(uuid=island_uuid) + params, r = run_api.ilibrary_edit_island(uuid, params=params4) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""Updating the same machine more than once may result in an Unexpected value change. Hence, Aborting..."" + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid, params3) run_api.library_delete(r1.json()[""uuid""]) -" -/ideploy/rest/segment/start/{UUID}/,starting island deployment an island machine from stopped state to running state,,"{ -""response"": operation successful -}","def test_ideploy_check_from_stopped_to_running(run_api, ideploy_start): - """""" - test_ideploy_check_from_stopped_to_running - """""" - x, r = ideploy_start - deploy_id = x[""deploy_uuid""] - result = run_api.ideploy_details(uuid=deploy_id).json() - final_state = result[""state""] - assert final_state == ""running"", 'The error is %s' % result -" -/ideploy/rest/snapshot/{UUID}/,add description in param,,200: Accepted,"def test_ideploy_snapshot_provided_description(ilibrary_add_new_island, run_api): - """""" - provide description when taking snaphot - """""" - params, r = ilibrary_add_new_island - island_uuid = r.json()[""uuid""] - r = run_api.ideploy_deploy(island_uuid) - deploy_id = r.json()['deploy_uuid'] - description = ""This is Test description"" - r, rtask_details = run_api.ideploy_snapshot(uuid=deploy_id, description=description) - snapshot_id = rtask_details[""result""][""snapshotted_island_uuid""] - test_assert.status(r, 201) - isl_details = run_api.ilibrary_details(snapshot_id).json() - assert isl_details['description'] == description, ""|> Json %s"" % isl_details - run_api.ideploy_delete(uuid=deploy_id) - run_api.ilibrary_delete(uuid=snapshot_id) -" -/ideploy/rest/snapshot/{UUID}/,Check for the transition of state from Stopped to Snapshotting,,,"def test_snapshot_check_from_stop_to_snapshotting(run_api, ilibrary_add_new_island): - """""" - check state transition from stop to snapshotting - """""" - params, r = ilibrary_add_new_island - island_uuid = r.json()[""uuid""] - r = run_api.ideploy_deploy(island_uuid) - deploy_id = r.json()[""deploy_uuid""] - r, current_state = run_api.deployment_snapshot_state_details(id=deploy_id) - assert current_state == ""snapshotting"", ""Current state is in {}"".format(current_state) - temp = wait_to_complete(run_api, r.json()) - snapshotted_island_uid = temp[""result""][""snapshotted_island_uuid""] - run_api.ideploy_delete(uuid=deploy_id) - run_api.ilibrary_delete(uuid=snapshotted_island_uid) -" -/ideploy/rest/snapshot/{UUID}/,Snapshot a Deployment which you are not an owner of and without Admin rights,,401: UnAuthorized,"@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_snapshot_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): - """""" - Snapshot the Island by non-admin - """""" - # Non-admin check of Starting a deployment created by different user - deploy_id = custom_ilib_admin_operations - r, rtask = run_api.ideploy_snapshot(deploy_id) - test_assert.status(r, 403) -" -/ideploy/rest/snapshot/{UUID}/,Snapshot a Deployment which you are not an owner of but with Admin rights,,200 : job created," -@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_snapshot_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): - """""" - Snapshot the Island by Admin - """""" - # Admin check of Starting a deployment created by different user - deploy_id = custom_ilib_non_admin_operations - r, rtask_details = run_api.ideploy_snapshot(deploy_id) - test_assert.status(r, 201) - run_api.ideploy_delete(deploy_id) - run_api.ilibrary_delete( - rtask_details['result']['snapshotted_island_uuid'], {}) -" -/ideploy/rest/snapshot/{UUID}/,Snapshotting a deployment creates revision in Island,,Working as intended,"def test_ideploy_snapshot_creates_revision(run_api, ideploy_deploy): - """""" - test_ideploy_snapshot_creates_revision - """""" - params, r = ideploy_deploy - deploy_id = r.json()[""deploy_uuid""] - r, rtask_details = run_api.ideploy_snapshot(uuid=deploy_id) - snapshotted_island_uid = rtask_details[""result""][""snapshotted_island_uuid""] - revision_count = run_api.ilibrary_details(uuid=snapshotted_island_uid).json()[""revision""] - run_api.ilibrary_delete(uuid=snapshotted_island_uid) - assert revision_count != 1, ""Revision count should not be 1, the error is {}"".format(rtask_details) -" -/ideploy/rest/snapshot/{UUID}/,snapshotting the deployment of island machine when requested with invalid token,"{ -uid =""invalid"" -} -","{ - ""status"" : 404, - ""message"" : ""Deployed island doesnt exist."" -}","def test_ideploy_snapshot_with_invalid_uuid(run_api): - """""" - snapshotting the deployment of island machine when requested with invalid token - """""" - uid = ""invalid"" - r, r_details = run_api.ideploy_snapshot(uuid=uid, wait=False) - test_assert.status(r, 404) - assert r.json()[""error""] == ""Deployed Island Doesn't Exist"" -" -/ideploy/rest/snapshot/{UUID}/,snapshotting the deployed island machine,,"{ -""status"":201 -}","def test_ideploy_snapshot_self(ideploy_snapshot): - """""" - Snapshot the Island - """""" - r = ideploy_snapshot - test_assert.status(r, 201) -" -/ideploy/rest/snapshot/{UUID}/,taking island of a deployed island mahcine without authorization,"{ -uid =""invalid"" -}","{ - ""status"" : 401, - ""message"" : ""Authentication credentials were not provided."" -}","def test_ideploy_snapshot_without_authorization(anonymous_exec_api): - """""" - taking snapshot of island without_authorization - """""" - uid = ""invalid"" - r, r_details = anonymous_exec_api.ideploy_snapshot(uuid=uid, wait=False) - test_assert.status(r, 401) - assert r.json()[""detail""] == ""Authentication credentials were not provided."" " -/ideploy/rest/snapshot/{UUID}/,taking snapshot of an island whenit is in running state,,"{ - ""status"" : 400, - ""message"" : ""Island snapshot is only allowed when all machines are in stopped state"" -}","def test_ideploy_snapshot_when_island_in_running_state(ideploy_start, run_api): - """""" - taking snapshot when island is in running state - """""" - x, r = ideploy_start - isl_id = x['deploy_uuid'] - r, rtask_details = run_api.ideploy_snapshot(uuid=isl_id) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""Island snapshot is only allowed when all machines are in stopped state"", ""|> Json %s"" % rjson -" -/ideploy/rest/start/{UUID}/,Check for the transition of state from Stopped to Running,,Working as intended,"def test_ideploy_check_from_stopped_to_running(run_api, ideploy_start): +/ilibrary/rest/edit/{UUID}/,updating NIC which is a part of some other machine,"params3 = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } + +params4 = { + ""machines"": { + ""update"": [ + { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""update"": [ + { + 'id': second_nic, + ""model"": ""virtio"" + } + ] + } + } + ] + }, + ""is_public"": False + }","{ + ""status"": 400, + ""message"": ""The provided nic with id isn't part of this machine"" +}","def test_ilibrary_edit_update_nics_of_other_island(run_api): """""" - state transition from stopped to running + Editing an Island Library by updating nic which is part of another island library """""" - x, r = ideploy_start - deploy_id = x[""deploy_uuid""] - result = run_api.ideploy_details(uuid=deploy_id).json() - final_state = result[""state""] - assert final_state == ""running"", 'The error is %s' % result + networks = template_networks() + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + params2, r2 = run_api.library_add_new_vm(networks=networks) + + # first_nic = r1.json()['hw']['networks'][0]['id'] + second_nic = r2.json()['hw']['networks'][0]['id'] + + params3 = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } + params3, r3 = run_api.ilibrary_add_new_island(params=params3) + rjson3 = r3.json() + uuid = rjson3['uuid'] + + params3 = { + ""machines"": { + ""update"": [ + { + ""uuid"": r1.json()[""uuid""], + ""nics"": { + ""update"": [ + { + 'id': second_nic, + ""model"": ""virtio"" + } + ] + } + } + ] + }, + ""is_public"": False + } + params, r = run_api.ilibrary_edit_island(uuid, params=params4) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""The provided nic with id ["" + str(second_nic) + ""] isn't part of this machine"" + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid, params) + run_api.library_delete(r1.json()[""uuid""]) + run_api.library_delete(r2.json()[""uuid""]) " -/ideploy/rest/start/{UUID}/,Start a Deployment which you are not an owner of and without Admin rights,,401: UnAuthorized,"@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_start_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): +/ilibrary/rest/edit/{UUID}/,updating machine with no segment name,"{ + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'update': [ + { + 'uuid': network_segment['uuid'], + 'name': '' + } + ] + + }, + ""is_public"": True + }","{ +""status"" : 400, +""message"" :""This field cannot be blank"" +}","def test_ilibrary_edit_segments_with_no_name(run_api): """""" - Start the Island by non-admin + Editing an Island Library segment with no name """""" - # Non-admin check of Starting a deployment created by different user - deploy_id = custom_ilib_admin_operations - r = run_api.ideploy_start(deploy_id) - test_assert.status(r, 403) - run_api.ideploy_stop(deploy_id) + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + network_segment = rjson['network_segments'][0] + params = { + 'network_segments': { + 'update': [ + { + 'uuid': network_segment['uuid'], + 'name': '' + } + ] + + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 400) + res = r.json() + assert res['network_segments']['update'][0]['name'] == ['This field may not be blank.'] + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) " -/ideploy/rest/start/{UUID}/,Start a Deployment which you are not an owner of but with Admin rights,,200 : job created,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_start_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): +/ilibrary/rest/edit/{UUID}/,updating machine with no machine name,"{ + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": rjson[""uuid""] + } + ] + }, + ""is_public"": False, + ""machines"": { + ""update"": [ + { + 'name': """", + ""uuid"": machine_uuid + } + ] + }, + ""is_public"": False + }","{ +""status"" : 201, +""response"" : success +}","def test_ilibrary_edit_with_no_machine_name(run_api, library_add_new_vm): """""" - Start the Island by Admin + Editing an Island Library with no machine name """""" - # Admin check of Starting a deployment created by different user - deploy_id = custom_ilib_non_admin_operations - r = run_api.ideploy_start(deploy_id) + params, rjson = library_add_new_vm + params = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": rjson[""uuid""] + } + ] + }, + ""is_public"": False + } + params1, r1 = run_api.ilibrary_add_new_island(params=params) + rjson1 = r1.json() + uuid = rjson1['uuid'] + machine_uuid = rjson1['machines'][0]['uuid'] + params = { + ""machines"": { + ""update"": [ + { + 'name': """", + ""uuid"": machine_uuid + } + ] + }, + ""is_public"": False + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) test_assert.status(r, 201) - run_api.ideploy_stop(deploy_id) + if 'error' not in rjson1.keys(): + run_api.ilibrary_delete(uuid, params1) " -/ideploy/rest/start/{UUID}/,Starting a deployment starts all Segments inside of it,,Working as intended,"def test_ideploy_deployment_starts_all_segment(run_api): +/ilibrary/rest/edit/{UUID}/,updating and deleting same machine,"params3 = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } +params2 = { + ""machines"": { + ""update"": [ + { + ""uuid"": machine_uuid, + 'description': 'description' + } + ], + 'delete': [ + { + ""uuid"": machine_uuid, + } + ], + }, + ""is_public"": False + }","{ + ""status"": 400, + ""message"": ""A machine cannot have both Deletion and Updation in same API call"" +}","def test_ilibrary_edit_update_and_delete_same_machine(run_api): """""" - test_ideploy_deployment_starts_all_segment + Editing an Island Library by updating and deleting same machine """""" networks = template_networks() - params1, r1 = run_api.library_add_new_vm(networks=networks) - machine1 = { - ""uuid"": r1.json()[""uuid""], - ""nics"": { + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + + params3 = { + ""name"": ""test_ilibrary"", + ""machines"": { ""add"": [ { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Default Public Segment"" - }, + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } + params3, r3 = run_api.ilibrary_add_new_island(params=params3) + rjson3 = r3.json() + uuid = rjson3['uuid'] + machine_uuid = rjson3['machines'][0]['uuid'] + + params2 = { + ""machines"": { + ""update"": [ + { + ""uuid"": machine_uuid, + 'description': 'description' + } + ], + 'delete': [ + { + ""uuid"": machine_uuid, + } + ], + }, + ""is_public"": False + } + params, r = run_api.ilibrary_edit_island(uuid, params=params2) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""A machine cannot have both Deletion and Updation in same API call"" + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid, params3) + run_api.library_delete(r1.json()[""uuid""]) + + +" +/ilibrary/rest/edit/{UUID}/,updating and deleting a segment name which already exists ,"{ + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + }, + 'network_segments': { + 'update': [ { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Seg1"" + 'uuid': seg_id, + 'name': 'network2' } ], - } - - } + 'delete': [ + { + 'uuid': seg_id, + } + ] + }, + ""is_public"": True + }","{ +""status"" : 400, +""message"" : ""The Segment shouldn't have both Updation and Deletion in same API call"" +}","def test_ilibrary_edit_update_and_delete_segment_same_name(run_api): + """""" + Editing an Island Library update a Segment name which already exists but is being deleted + """""" params = { - ""name"": ""Machine1"", + ""name"": ""test_ilibrary"", ""is_public"": True, - ""machines"": { - ""add"": [machine1], - }, - ""network_segments"": { - ""add"": [ - { - ""name"": ""Seg1"", - ""description"": ""string"", - ""enable_ipv4"": False - }, + 'network_segments': { + 'add': [ { - ""name"": ""Seg2"", - ""description"": ""string"", - ""enable_ipv4"": False - }, + 'name': 'network1' + } ] } + } params, r = run_api.ilibrary_add_new_island(params=params) - island_uuid = r.json()[""uuid""] - res = run_api.ideploy_deploy(uuid=island_uuid) - deploy_uuid = res.json()[""deploy_uuid""] - run_api.ideploy_start(deploy_uuid) - r_details = run_api.ideploy_details(deploy_uuid) - result = r_details.json() - segment_list = result[""island""][""network_segments""] - for segment in segment_list: - if segment[""status""] != ""active"": - assert False, ""The error is %s"" % result - machine_uuids = [mc[""uuid""] for mc in r_details.json()[""machines""]] - deploy_bulkops_params = { - ""machine_list"": machine_uuids, - ""op"": ""poweroff"" - } - run_api.deploy_bulkops(deploy_bulkops_params) - run_api.ideploy_shutdown(deploy_uuid) - run_api.ideploy_delete(uuid=deploy_uuid) - run_api.ilibrary_delete(uuid=island_uuid) - run_api.library_delete(r1.json()[""uuid""]) - -" -/ideploy/rest/start/{UUID}/,starting the deployment of island machine when requested with invalid token,"{ -uid =""invalid"" -} - -","{ - ""status"" : 404, - ""message"" : ""Deployed island doesnt exist."" -}","def test_ideploy_with_invalid_uuid(run_api): - """""" - starting the deployment of island machine when requested with invalid token - """""" - uid = ""invalid"" - r = run_api.ideploy_start(uuid=uid) - test_assert.status(r, 404) - assert r.json()[""error""] == ""Deployed Island Doesn't Exist"" -" -/ideploy/rest/start/{UUID}/,starting the deployment of island machine,,"{ -""status"":201 -}","def test_ideploy_start_self(ideploy_start): - """""" - Start the Island - """""" - x, r = ideploy_start - test_assert.status(r, 201) -" -/ideploy/rest/start/{UUID}/,starting a deployed island machine without authorization,"{ -uid =""invalid"" -}","{ - ""status"" : 401, - ""message"" : ""Authentication credentials were not provided."" -}","def test_ideploy_start_without_authorization(anonymous_exec_api): - """""" - test_ideploy_without_authorization - """""" - uid = ""invalid"" - r = anonymous_exec_api.ideploy_start(uuid=uid, wait=False) - test_assert.status(r, 401) - assert r.json()[""detail""] == ""Authentication credentials were not provided."" -" -/ideploy/rest/start/{UUID}/,"starting island machine by a manager , when the manager do not right over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_ideploy_start_manager_no_server_right(skip_if_not_manager, - custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): - """""" - Start the Island by manager when have no right on server - """""" - # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server - deploy_id = custom_ilib_admin_operations - r = run_api.ideploy_start(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) - run_api.ideploy_stop(deploy_id) + rjson = r.json() + uuid = rjson[""uuid""] + segment_list = rjson['network_segments'] - # When the user is part of the group that the manager manages but the deployment is not on manager rightful server - deploy_id = custom_ilib_non_admin_operations - r = run_api.ideploy_start(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) - run_api.ideploy_stop(deploy_id) -" -/ideploy/rest/start/{UUID}/,"starting island machine by a manager , when the manager have right over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_ideploy_start_manager_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): - """""" - Start the Island by manager when have right on server - """""" - # When the user is not part of the group that the manager manages - deploy_id = custom_ilib_admin_operations - r = run_api.ideploy_start(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) - run_api.ideploy_stop(deploy_id) + for segment in segment_list: + if segment['name'] == 'network1': + seg_id = segment['uuid'] - # When the user is part of the group that the manager manages and deployment is on manager rights to server - deploy_id = custom_ilib_non_admin_operations - r = run_api.ideploy_start(deploy_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) - run_api.ideploy_stop(deploy_id) -" -/ideploy/rest/stop/{UUID}/,Check for the transition of state from Running to Stopped,,Working as intended,"def test_ideploy_stop_checking_state_running_to_stop(ideploy_start, run_api): - """""" - Check for the transition of state from Running to Stopped - """""" - res, r = ideploy_start - deploy_id = res[""deploy_uuid""] - running_r = run_api.ideploy_details(deploy_id) - running_rjson = running_r.json() - assert running_rjson['state'] == 'running', ""json |> %s"" % running_rjson - run_api.ideploy_stop(deploy_id) - run_api.ideploy_shutdown(deploy_id) - stop_r = run_api.ideploy_details(deploy_id) - stop_rjson = stop_r.json() - assert stop_rjson['state'] == 'stopped', ""json |> %s"" % stop_rjson -" -/ideploy/rest/stop/{UUID}/,Stop a Deployment which you are not an owner of and without Admin rights,,401: UnAuthorized,"@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_stop_without_owner_and_admin_rights(skip_if_admin, custom_ilib_admin_operations, run_api): - """""" - Stop a Deployment which you are not an owner of and without Admin rights - """""" - deploy_id = custom_ilib_admin_operations - r = run_api.ideploy_stop(deploy_id, error=True) - test_assert.status(r, 403) - rjson = r.json() - assert rjson['error'] == 'You do not have permission to perform this action.', ""The error message is %s"" % rjson + params = { + 'network_segments': { + 'update': [ + { + 'uuid': seg_id, + 'name': 'network2' + } + ], + 'delete': [ + { + 'uuid': seg_id, + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""The Segment shouldn't have both Updation and Deletion in same API call"" + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) " -/ideploy/rest/stop/{UUID}/,Stop a Deployment which you are not an owner of but with Admin rights,,200 : job created,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -def test_ideploy_stop_with_admin_rights(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): +/ilibrary/rest/edit/{UUID}/,editing Island which you are not an owner of but with Admin rights,"{ + ""name"": ""test_ilibrary"", + ""is_public"": True + ""name"": ""test_ilibrary_edit"", + ""is_public"": True + }","{ +""status"" : 201, +""response"" : success +}","def test_ilibrary_edit_admin(skip_if_not_admin, run_api, non_admin_exec_api): """""" - stop a Deployment which you are not an owner of and with Admin rights + Editing an priate Island Library by admin whose owner is not admin """""" - deploy_id = custom_ilib_non_admin_operations - r = run_api.ideploy_stop(deploy_id) + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r_isl = non_admin_exec_api.ilibrary_add_new_island(params=params) + rjson_isl = r_isl.json() + ilib_id = rjson_isl[""uuid""] + params = { + ""name"": ""test_ilibrary_edit"", + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(ilib_id, params=params) test_assert.status(r, 201) + if 'error' not in rjson_isl.keys(): + r = run_api.ilibrary_delete(ilib_id, {}) " -/ideploy/rest/stop/{UUID}/,Stopping a deployment Stops all Segments inside of it,,Working as intended,"def test_ideploy_stop_checking_state_of_segments(ideploy_start, run_api): - """""" - Stopping a deployment Stops all Segments inside of it +/ilibrary/rest/edit/{UUID}/,editing an island-library with no description,"{ + ""name"": ""test_ilibrary"", + ""is_public"": True + ""name"": ""test_ilibrary_edit"", + ""is_public"": True + }","{ +""status"" : 201, +""response"" : success +}","def test_ilibrary_edit_with_no_description(run_api, library_add_new_vm): """""" - res, r = ideploy_start - deploy_id = res[""deploy_uuid""] - run_api.ideploy_stop(deploy_id) - run_api.ideploy_shutdown(deploy_id) - stop_r = run_api.ideploy_details(deploy_id) - stop_rjson = stop_r.json() - stop_network_segments = stop_rjson['island']['network_segments'] - for stop in stop_network_segments: - if stop['name'] not in (""Default Public Segment"", ""HostOnly Segment""): - assert stop['status'] == ""inactive"", ""json |> %s"" % stop - + Editing an Island Library with no description + """""" + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + params = { + ""name"": ""test_ilibrary_edit"", + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 201) + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) " -/ideploy/rest/stop/{UUID}/,stopping the deployment of island machine using invalid uuid,"{ -deploy_id =""invalid"" -} - -","{ - ""status"" : 404, - ""message"" : ""Deployed island doesnt exist."" -}","def test_ideploy_stop_invalid_uuid(run_api): +/ilibrary/rest/edit/{UUID}/,editing an Island with no Segments,"{ + ""name"": ""test_ilibrary"", + ""is_public"": True + 'network_segments': {}, + ""is_public"": True + }","{ +""status"" : 201, +""response"" : success +}","def test_ilibrary_edit_with_no_segments(run_api): """""" - When Island Deployment uuid doesnot exist + Editing an Island Library with no segments """""" - deploy_id = ""invalid"" - r = run_api.ideploy_stop(deploy_id, error=True) - test_assert.status(r, 404) + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r = run_api.ilibrary_add_new_island(params=params) rjson = r.json() - assert rjson['error'] == ""Deployed Island Doesn't Exist"", ""The error meassage is %s"" % rjson + uuid = rjson[""uuid""] + params = { + 'network_segments': {}, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 201) + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) " -/ideploy/rest/stop/{UUID}/,stopping the deployment of island machine using valid existing uuid,,"{ -""status"":201 -}","def test_ideploy_stop(ideploy_start, run_api): +/ilibrary/rest/edit/{UUID}/,editing an island library which has next revision,"{ + 'name': 'test', + ""is_public"": False + }","{ +""status"" : 403, +""response"" : forbidden +}","def test_ilibrary_edit_has_next_revision(run_api, ilibrary_add_new_island): """""" - When provided with valid uuid + Editing an Island Library which has next revision """""" - params, r = ideploy_start - deploy_id = params['deploy_uuid'] - res = run_api.ideploy_stop(deploy_id) - test_assert.status(res, 201) + template, r = ilibrary_add_new_island + isl_uuid = r.json()[""uuid""] + r = run_api.ideploy_deploy(isl_uuid) + deploy_id = r.json()[""deploy_uuid""] + r, rtask_details = run_api.ideploy_snapshot(deploy_id) + run_api.ideploy_delete(deploy_id) + params = { + 'name': 'test', + ""is_public"": False + } + params, r = run_api.ilibrary_edit_island(isl_uuid, params=params) + test_assert.status(r, 403) + run_api.ilibrary_delete(rtask_details['result']['snapshotted_island_uuid'], {}) + r = run_api.ilibrary_delete(isl_uuid, {}) " -/ideploy/rest/stop/{UUID}/,stopping the deployment of deployed island machine when requested with invalid token,"{ -deploy_id =""invalid"" +/ilibrary/rest/edit/{UUID}/,editing an Island library when requested with invalid token,"{ +uuid = 'invalid-uuid' +params = { + ""name"": ""test_ilibrary"", + ""is_public"": True +} }","{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_ideploy_stop_with_invalid_token(invalid_exec_api): - - deploy_id = ""invalid"" - r = invalid_exec_api.ideploy_stop(deploy_id, error=True) - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""The error is message %s"" % rjson - -" -/ideploy/rest/stop/{UUID}/,stopping a deployed island machine without authorization,"{ -deploy_id =""invalid"" -}","{ - ""status"" : 401, - ""message"" : ""Authentication credentials were not provided."" -}","def test_ideploy_stop_without_authorizaton(anonymous_exec_api): +}","def test_ilibrary_edit_with_invalid_token(invalid_exec_api): """""" - without authorization + Editing an Island Library with invalid token """""" - deploy_id = ""invalid"" - r = anonymous_exec_api.ideploy_stop(deploy_id, error=True) + uuid = 'invalid-uuid' + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r = invalid_exec_api.ilibrary_edit_island(uuid, params=params) test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""The error is message %s"" % rjson - - + res = r.json() + assert res['detail'] == ""Invalid token."" " -/ideploy/rest/stop/{UUID}/,"stopping island machine by a manager , when the manager do not have right over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_ideploy_stop_by_manager_without_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): +/ilibrary/rest/edit/{UUID}/,editing an Island library when invalid UUID is provided,"{ +uuid = 'invalid-uuid' +params = { + ""name"": ""test_ilibrary"", + ""is_public"": True +} +}","{ + ""status"": 404, +}","def test_ilibrary_edit_invalid_uuid(run_api): """""" - Ideploy stop by manager without server right + Editing an Island Library with invalid uuid """""" - # When Manager manages the user but not the server - deploy_id = custom_ilib_non_admin_operations - r = run_api.ideploy_stop(deploy_id, error=True) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) - - # when manager does not manage the user nor the server - deploy_id = custom_ilib_admin_operations - r = run_api.ideploy_stop(deploy_id, error=True) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + uuid = 'invalid-uuid' + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 404) " -/ideploy/rest/stop/{UUID}/,"stopping island machine by a manager , when the manager have right over the servers",,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_ideploy_stop_by_manager_with_server_right(skip_if_not_manager, custom_ilib_non_admin_operations, custom_ilib_admin_operations, run_api): +/ilibrary/rest/edit/{UUID}/,"editing an Island library successfully. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{""name"": ""add"", ""is_public"": False}","{ + ""status"": 403 +}","endpoint = ""ilibrary_edit"" +PARAMETERS = [{""dest_obj"": OBJ_ISL}] + +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_edit(run_api, ilibrary_edit_island, custom_ilib_admin_operations, custom_ilib_non_admin_operations): """""" - Ideploy stop by manager with server right + Editing an Island Library """""" - # when the manager manages the user and server - deploy_id = custom_ilib_non_admin_operations - r = run_api.ideploy_stop(deploy_id, error=True) - print(r.status_code) - test_assert.status(r, manager_rights_response(endpoint, manages_server=True, manages_user=True)) + params, r = ilibrary_edit_island + test_assert.status(r, 201) - # when the manager manages the server but does not manages user - deploy_id = custom_ilib_admin_operations - r = run_api.ideploy_stop(deploy_id, error=True) - test_assert.status(r, manager_rights_response(endpoint, manages_server=True, manages_user=False)) + # Adding non_admin check of Editing an Island Library created by different user + if run_api.user_type == USER_TYPE[""non_admin""]: + lib_id = custom_ilib_admin_operations + params = {""name"": ""add"", ""is_public"": False} + params, r = run_api.ilibrary_edit_island(lib_id, params=params) + test_assert.status(r, 403) + + # Adding a Manager check of Editing an Island Library created by a user of his/her group + # and also when it's not the case + if run_api.user_type == USER_TYPE[""manager""]: + # When the user is not part of the group that the manager manages + lib_id = custom_ilib_admin_operations + params = {""name"": ""add"", ""is_public"": False} + params, r = run_api.ilibrary_edit_island(lib_id, params=params) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) + + # When the user is part of the group that the manager manages + lib_id = custom_ilib_non_admin_operations + params = {""name"": ""add"", ""is_public"": False} + params, r = run_api.ilibrary_edit_island(lib_id, params=params) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) " -/ilibrary/rest/add/,creating an island library and adding it,,"{ +/ilibrary/rest/edit/{UUID}/,editing an island library segment,"{ + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'seg1' + }, + { + 'name': 'seg2' + }, + { + 'name': 'seg3' + }, + ] + }, + 'network_segments': { + 'update': [ + { + 'uuid': seg1_id, + 'name': 'seg3' + }, + { + 'uuid': seg2_id, + 'name': 'seg1' + }, + { + 'uuid': seg3_id, + 'name': 'seg2' + } + ] + }, + ""is_public"": True + }","{ ""status"" : 201, ""response"" : success -}","def test_ilibrary_add(run_api, ilibrary_add_new_island): +}","def test_ilibrary_edit_segments(run_api): """""" - Creating an Island Library + Editing an Island Library segments """""" - params, r = ilibrary_add_new_island - test_assert.status(r, 201) -" -/ilibrary/rest/add/,creating an island library and adding it when invalid UUID of machine is provided,"{ - ""name"": ""test_ilibrary_add_invalid_uuid_machine"", - ""machines"": { - ""add"": [ + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + 'network_segments': { + 'add': [ + { + 'name': 'seg1' + }, + { + 'name': 'seg2' + }, { - ""uuid"": ""invalid-uuid"" - } + 'name': 'seg3' + }, ] }, - ""is_public"": True - }","{ -""status"" : 400, -""message"" : ""Valid UUID must be provided"" -}","def test_ilibrary_add_invalid_uuid_machine(run_api): - """""" - Creating an Island Library with invalid uuid - """""" + } + params, r = run_api.ilibrary_add_new_island(params=params) + rjson = r.json() + uuid = rjson[""uuid""] + segment_list = rjson['network_segments'] + + for segment in segment_list: + if segment['name'] == 'seg1': + seg1_id = segment['uuid'] + elif segment['name'] == 'seg2': + seg2_id = segment['uuid'] + elif segment['name'] == 'seg3': + seg3_id = segment['uuid'] + params = { - ""name"": ""test_ilibrary_add_invalid_uuid_machine"", - ""machines"": { - ""add"": [ + 'network_segments': { + 'update': [ { - ""uuid"": ""invalid-uuid"" + 'uuid': seg1_id, + 'name': 'seg3' + }, + { + 'uuid': seg2_id, + 'name': 'seg1' + }, + { + 'uuid': seg3_id, + 'name': 'seg2' } ] }, ""is_public"": True } - params, r = run_api.ilibrary_add_new_island(params=params) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['machines']['add'][0]['uuid'] == ['Must be a valid UUID.'] + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 201) + res = r.json() + segment_list = res['network_segments'] + + for segment in segment_list: + if segment['uuid'] == seg1_id: + assert segment['name'] == 'seg3' + elif segment['uuid'] == seg2_id: + assert segment['name'] == 'seg1' + elif segment['uuid'] == seg3_id: + assert segment['name'] == 'seg2' + + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) + " -/ilibrary/rest/add/,creating an island library and adding it when machine which is a part of another Island is provided,"{ - ""name"": ""test_ilibrary_add_machine_from_other_island"", - ""machines"": { - ""add"": [ +/ilibrary/rest/edit/{UUID}/,editing an Island Library by deleting segment which is part of another island library,"params1 = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } +params2 = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } +params3 = { + 'network_segments': { + 'delete': [ { - ""uuid"": uuid + 'uuid': network_segment2['uuid'] } ] + }, ""is_public"": True }","{ -""status"" : 400, -""message"" : ""Adding machine which already is a part of an Island isn't supported..."" -}","def test_ilibrary_add_machine_from_other_island(run_api, ilibrary_add_new_island): + ""status"": 400, + ""message"": ""The Segment uuid doesn't exist in the island"" +}","def test_ilibrary_edit_delete_segments_of_other_island(run_api): """""" - Creating an Island Library by adding machine from another island + Editing an Island Library by deleting segment which is part of another island library """""" - params, r = ilibrary_add_new_island - rjson = r.json() - machines = rjson['machines'] - uuid = machines[0]['uuid'] - params = { - ""name"": ""test_ilibrary_add_machine_from_other_island"", - ""machines"": { - ""add"": [ + params1 = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params1, r1 = run_api.ilibrary_add_new_island(params=params1) + rjson1 = r1.json() + uuid1 = rjson1[""uuid""] + + params2 = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params2, r2 = run_api.ilibrary_add_new_island(params=params2) + rjson2 = r2.json() + uuid2 = rjson2[""uuid""] + + network_segment2 = rjson2['network_segments'][0] + params3 = { + 'network_segments': { + 'delete': [ { - ""uuid"": uuid + 'uuid': network_segment2['uuid'] } ] + }, ""is_public"": True } - params, r = run_api.ilibrary_add_new_island(params=params) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""Adding machine which already is a part of an Island isn't supported..."" + params3, r3 = run_api.ilibrary_edit_island(uuid1, params=params3) + test_assert.status(r3, 400) + res = r3.json() + assert res['error'] == ""The Segment uuid ["" + network_segment2['uuid'] + ""] doesn't exist in the island"" + if 'error' not in rjson1.keys(): + run_api.ilibrary_delete(uuid1, params1) + if 'error' not in rjson2.keys(): + run_api.ilibrary_delete(uuid2, params2) " -/ilibrary/rest/add/,creating an island library and adding it when only the required params,"{ - ""name"": ""test_ilibrary_add_required_params"", +/ilibrary/rest/edit/{UUID}/,editing an ilibrary without token,"{ +uuid = 'valid-uuid' +params = { + ""name"": ""test_ilibrary"", ""is_public"": True +} }","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_ilibrary_edit_without_authorization(anonymous_exec_api): + """""" + Editing an Island Library without authorization + """""" + uuid = 'valid-uuid' + params = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params, r = anonymous_exec_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 401) + res = r.json() + assert res['detail'] == ""Authentication credentials were not provided."" +" +/ilibrary/rest/edit/{UUID}/,editing an ilibrary with no name,"{ + ""name"": ""test_ilibrary"", + ""is_public"": True, + ""description"": ""testing"", + ""is_public"": True + }","{ ""status"" : 201, -""response"" : success , island library created -}","def test_ilibrary_add_required_params(run_api): +""response"" : success +}","def test_ilibrary_edit_with_no_name(run_api): """""" - Creating an Island Library with required params + Editing an Island Library with no name """""" params = { - ""name"": ""test_ilibrary_add_required_params"", + ""name"": ""test_ilibrary"", ""is_public"": True } params, r = run_api.ilibrary_add_new_island(params=params) - test_assert.status(r, 201) rjson = r.json() + uuid = rjson[""uuid""] + params = { + ""description"": ""testing"", + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 201) if 'error' not in rjson.keys(): uuid = rjson[""uuid""] run_api.ilibrary_delete(uuid, params) " -/ilibrary/rest/add/,creating an island library and adding it when provided Island with no name,"{ - ""name"": """", +/ilibrary/rest/edit/{UUID}/,editing an ilibrary with no machines operations,"{ +uuid = 'valid-uuid' +params = { + ""name"": ""test_ilibrary"", + ""is_public"": True, + ""machines"": {}, ""is_public"": True + +} }","{ -""status"" : 400, -""message"" : ""This field must not be blank"" -}","def test_ilibrary_add_empty_island_name(run_api): +""status"" : 201, +""response"" : success +}","def test_ilibrary_edit_with_no_machine_operation(run_api): """""" - Creating an Island Library with empty island name + Editing an Island Library with no machine operation """""" params = { - ""name"": """", + ""name"": ""test_ilibrary"", ""is_public"": True } params, r = run_api.ilibrary_add_new_island(params=params) - test_assert.status(r, 400) rjson = r.json() - assert rjson['name'] == ['This field may not be blank.'] + uuid = rjson[""uuid""] + params = { + 'machines': {}, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 201) + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) " -/ilibrary/rest/add/,creating an island library and adding it when provided machine with no name,"{ - ""name"": ""test_ilibrary_add_machine_from_other_island"", +/ilibrary/rest/edit/{UUID}/,deleting NIC which is a part of some other machine,"params3 = { + ""name"": ""test_ilibrary"", + ""machines"": { + ""add"": [ + { + ""uuid"": r1.json()[""uuid""] + } + ] + }, + ""is_public"": False + } +params4 = { + ""machines"": { + ""update"": [ + { + ""uuid"": machine_uuid, + ""nics"": { + ""delete"": [ + { + 'id': second_nic + } + ] + } + } + ] + }, + ""is_public"": False + }","{ + ""status"": 400, + ""message"": ""The NIC with the given id isn't part of the provided machine"" +}","def test_ilibrary_edit_delete_nic_of_other_machine(run_api): + """""" + Editing an Island Library by Deleting NIC which is not part of this machine but some other machine + """""" + networks = template_networks() + if run_api.arch_type == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + params2, r2 = run_api.library_add_new_vm(networks=networks) + + # first_nic = r1.json()['hw']['networks'][0]['id'] + second_nic = r2.json()['hw']['networks'][0]['id'] + + params3 = { + ""name"": ""test_ilibrary"", ""machines"": { ""add"": [ { - 'name': """", - ""uuid"": rjson[""uuid""] + ""uuid"": r1.json()[""uuid""] } ] }, - ""is_public"": True - }","{ -""status"" : 201, -""response"" : success , island library created -}"," -def test_ilibrary_add_machine_with_no_name(run_api, library_add_new_vm): - """""" - Creating an Island Library of machine with no name - """""" - params, rjson = library_add_new_vm - params = { - ""name"": ""test_ilibrary_add_machine_from_other_island"", + ""is_public"": False + } + params3, r3 = run_api.ilibrary_add_new_island(params=params3) + rjson3 = r3.json() + uuid = rjson3['uuid'] + machine_uuid = rjson3['machines'][0]['uuid'] + params3 = { ""machines"": { - ""add"": [ + ""update"": [ { - 'name': """", - ""uuid"": rjson[""uuid""] + ""uuid"": machine_uuid, + ""nics"": { + ""delete"": [ + { + 'id': second_nic + } + ] + } } ] }, - ""is_public"": True + ""is_public"": False } - params1, r1 = run_api.ilibrary_add_new_island(params=params) - test_assert.status(r1, 201) - rjson1 = r1.json() - run_api.library_delete(rjson[""uuid""], params) - if 'error' not in rjson.keys(): - uuid = rjson1[""uuid""] - run_api.ilibrary_delete(uuid, params1) + params, r = run_api.ilibrary_edit_island(uuid, params=params4) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""The NIC with the given id '"" + str(second_nic) + ""' isn't part of the provided machine"" + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid, params) + run_api.library_delete(r1.json()[""uuid""]) + run_api.library_delete(r2.json()[""uuid""]) " -/ilibrary/rest/add/,creating an island library and adding it when provided segment without name,"{ - ""name"": ""test_ilibrary_add_bigger_start_ip"", - ""is_public"": True, - ""network_segments"": { +/ilibrary/rest/edit/{UUID}/,deleting Island with UUID of machine which is part of other Island,"params2 = { + ""name"": ""test_ilibrary"", + ""machines"": { ""add"": [ { - ""enable_ipv4"": True, - ""bridge_ip"": ""192.168.98.0"", - ""network_subnet"": ""255.255.255.0"", - ""enable_dhcp"": True, - ""start_ip"": ""192.168.98.1"", - ""end_ip"": ""192.168.98.150"" + ""uuid"": rjson1[""uuid""] } ] - } + }, + ""is_public"": False + } +params3 = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } +params4 = { + ""machines"": { + ""delete"": [ + { + ""uuid"": machine_uuid + } + ] + }, + ""is_public"": False }","{ -""status"" : 400, -""message"" : ""This field must not be blank"" -}","def test_ilibrary_add_without_segmennt_name(run_api): + ""status"": 404, + ""response"": forbidden +}","def test_ilibrary_edit_delete_machine_of_another_island(run_api, library_add_new_vm): """""" - Creating an Island Library without segment name + Editing an Island Library by deleting uuid of machine which is part of another island """""" - params = { - ""name"": ""test_ilibrary_add_bigger_start_ip"", - ""is_public"": True, - ""network_segments"": { + + params1, rjson1 = library_add_new_vm + params2 = { + ""name"": ""test_ilibrary"", + ""machines"": { ""add"": [ { - ""enable_ipv4"": True, - ""bridge_ip"": ""192.168.98.0"", - ""network_subnet"": ""255.255.255.0"", - ""enable_dhcp"": True, - ""start_ip"": ""192.168.98.1"", - ""end_ip"": ""192.168.98.150"" + ""uuid"": rjson1[""uuid""] } ] - } + }, + ""is_public"": False } - params, r = run_api.ilibrary_add_new_island(params=params) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['network_segments']['add'][0]['name'] == ['This field is required.'] -" -/ilibrary/rest/add/,creating an island library and adding it when provided start_ip and/or end_ip value is out of range as that of bridge_ip/Subnet range,"{ - ""name"": ""test_ilibrary_add_ips_out_of_range"", - ""is_public"": True, - ""network_segments"": { - ""add"": [ + params2, r2 = run_api.ilibrary_add_new_island(params=params2) + rjson2 = r2.json() + uuid2 = rjson2['uuid'] + machine_uuid = rjson2['machines'][0]['uuid'] + + params3 = { + ""name"": ""test_ilibrary"", + ""is_public"": True + } + params3, r3 = run_api.ilibrary_add_new_island(params=params3) + rjson3 = r3.json() + uuid3 = rjson3[""uuid""] + + params4 = { + ""machines"": { + ""delete"": [ { - ""name"": ""test_segment"", - ""enable_ipv4"": True, - ""bridge_ip"": ""192.168.98.0"", - ""network_subnet"": ""255.255.255.0"", - ""enable_dhcp"": True, - ""start_ip"": ""191.168.10.1"", - ""end_ip"": ""191.168.10.150"" + ""uuid"": machine_uuid } ] - } + }, + ""is_public"": False } -","{ -""status"" : 400, -""message"" : ""start_ip and/or end_ip should lie between inclusive range "" -}","def test_ilibrary_add_ips_out_of_range(run_api): - """""" - Creating an Island Library with out of range start ip, end ip - """""" - params = { - ""name"": ""test_ilibrary_add_ips_out_of_range"", - ""is_public"": True, - ""network_segments"": { + params, r = run_api.ilibrary_edit_island(uuid3, params=params4) + test_assert.status(r, 404) + if 'error' not in rjson2.keys(): + run_api.ilibrary_delete(uuid2, params2) + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid3, params3) + +" +/ilibrary/rest/edit/{UUID}/,deleting an island library segment which is connected to NIC,"params = { + ""name"": ""test_ilibrary"", + ""machines"": { ""add"": [ { - ""name"": ""test_segment"", - ""enable_ipv4"": True, - ""bridge_ip"": ""192.168.98.0"", - ""network_subnet"": ""255.255.255.0"", - ""enable_dhcp"": True, - ""start_ip"": ""191.168.10.1"", - ""end_ip"": ""191.168.10.150"" + ""uuid"": r1.json()[""uuid""] } ] - } + }, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + }, + ""is_public"": False } - params, r = run_api.ilibrary_add_new_island(params=params) - test_assert.status(r, 400) - rjson = r.json() - assert ""start_ip and/or end_ip should lie between inclusive range of"" in rjson['error'] -" -/ilibrary/rest/add/,creating an island library and adding it when provided with NIC id of machine which is not part of the current machine,"{ - ""name"": ""test_ilibrary_add_machine_with_other_nic_id"", - ""is_public"": True, +params3 = { ""machines"": { - ""add"": [ + ""update"": [ { - 'name': ""machine"", - ""uuid"": r1.json()[""uuid""], + ""uuid"": machine_uuid, ""nics"": { - ""update"": [ + ""add"": [ { - 'id': second_nic, - ""model"": ""virtio"" + ""model"": ""virtio"", + 'segment': 'network1' } ] } } ] - } + }, + ""is_public"": False }","{ -""status"" : 400, -""message"" : ""The provided nic with id isn't part of this machine"" -}","def test_ilibrary_add_machine_other_nic_id(run_api): + ""status"": 201, + ""response"" : success +}","def test_ilibrary_edit_delete_segment_connected_to_nic(run_api): """""" - Adding Machine with id of NIC which is not part of this machine but some other machine + Editing an Island Library by Deleteing a Segment connected to NIC and check NICs final connection """""" networks = template_networks() if run_api.arch_type == ""aarch64"": params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") else: params1, r1 = run_api.library_add_new_vm(networks=networks) - params2, r2 = run_api.library_add_new_vm(networks=networks) - # first_nic = r1.json()['hw']['networks'][0]['id'] - second_nic = r2.json()['hw']['networks'][0]['id'] params = { - ""name"": ""test_ilibrary_add_machine_with_other_nic_id"", - ""is_public"": True, + ""name"": ""test_ilibrary"", ""machines"": { ""add"": [ { - 'name': ""machine"", - ""uuid"": r1.json()[""uuid""], + ""uuid"": r1.json()[""uuid""] + } + ] + }, + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + }, + ""is_public"": False + } + + params, r3 = run_api.ilibrary_add_new_island(params=params) + rjson3 = r3.json() + uuid = rjson3['uuid'] + segment_list = rjson3['network_segments'] + + for segment in segment_list: + if segment['name'] == 'network1': + seg_uuid = segment['uuid'] + + machine_uuid = rjson3['machines'][0]['uuid'] + params3 = { + ""machines"": { + ""update"": [ + { + ""uuid"": machine_uuid, ""nics"": { - ""update"": [ + ""add"": [ { - 'id': second_nic, - ""model"": ""virtio"" + ""model"": ""virtio"", + 'segment': 'network1' } ] } } ] - } + }, + ""is_public"": False } - params, r = run_api.ilibrary_add_new_island(params=params) - test_assert.status(r, 400) + params, r = run_api.ilibrary_edit_island(uuid, params=params3) + nics = r.json()['machines'][0]['hw']['networks'] + for nic in nics: + if nic['segment'] == 'network1': + nic_id = nic['id'] + + params = { + 'network_segments': { + 'delete': [ + { + 'uuid': seg_uuid + } + ] + }, + ""is_public"": False + } + params, r = run_api.ilibrary_edit_island(uuid, params=params) + test_assert.status(r, 201) rjson = r.json() - assert rjson['error'] == ""The provided nic with id ["" + str(second_nic) + ""] isn't part of this machine"" + + nics = rjson['machines'][0]['hw']['networks'] + assert isinstance(nic_id, int) + for nic in nics: + if nic['id'] == nic_id: + assert nic['segment'] is None + + if 'error' not in rjson3.keys(): + run_api.ilibrary_delete(uuid, params3) run_api.library_delete(r1.json()[""uuid""]) - run_api.library_delete(r2.json()[""uuid""]) -" -/ilibrary/rest/add/,creating an island library and adding it when requested with invalid token,"{ -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_ilibrary_add_with_invalid_token(invalid_exec_api): - """""" - Creating an Island Library with invalid token - """""" - params = {} - params, r = invalid_exec_api.ilibrary_add_new_island(params=params) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == 'Invalid token.' -" -/ilibrary/rest/add/,creating an island library and adding it when required fields are not provided,"{ -}","{ -""status"" : 400, -""message"" : ""Required fields should be provided"" -}","def test_ilibrary_add_without_params(run_api): - """""" - Creating an Island Library without params - """""" - params = {} - params, r = run_api.ilibrary_add_new_island(params=params) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['name'] == ['This field is required.'] - assert rjson['is_public'] == ['This field is required.'] " -/ilibrary/rest/add/,creating an island library and adding it when segment with `Default-Public-Segment` name,"{ - ""name"": ""test_ilibrary_add_bigger_start_ip"", +/ilibrary/rest/edit/{UUID}/,adding and updating the segment giving the same name that already exists,"params = { + ""name"": ""test_ilibrary"", ""is_public"": True, - ""network_segments"": { - ""add"": [ + 'network_segments': { + 'add': [ { - 'name': 'Default-Public-Segment', - ""enable_ipv4"": True, - ""bridge_ip"": ""192.168.98.0"", - ""network_subnet"": ""255.255.255.0"", - ""enable_dhcp"": True, - ""start_ip"": ""192.168.98.1"", - ""end_ip"": ""192.168.98.150"" + 'name': 'network1' } ] } + } +params1 = { + 'network_segments': { + 'add': [ + { + 'name': 'network2' + } + ], + 'update': [ + { + 'uuid': seg_id, + 'name': 'network2' + } + ] + }, + ""is_public"": True }","{ -""status"" : 400, -""message"" : ""NetworkSegment name cannot contain any whitespace nor any special characters other than '_' or '-'"" -}","def test_ilibrary_add_default_segmennt_name(run_api): + ""status"": 400, + ""message"": ""Segment name should be unique for an island"" +}","def test_ilibrary_edit_add_and_update_segment_same_name(run_api): """""" - Creating an Island Library with segment name as 'Default Public Segment' + Editing an Island Library add and update segment with same name """""" params = { - ""name"": ""test_ilibrary_add_bigger_start_ip"", + ""name"": ""test_ilibrary"", ""is_public"": True, - ""network_segments"": { - ""add"": [ + 'network_segments': { + 'add': [ { - 'name': 'Default-Public-Segment', - ""enable_ipv4"": True, - ""bridge_ip"": ""192.168.98.0"", - ""network_subnet"": ""255.255.255.0"", - ""enable_dhcp"": True, - ""start_ip"": ""192.168.98.1"", - ""end_ip"": ""192.168.98.150"" + 'name': 'network1' } ] } + } params, r = run_api.ilibrary_add_new_island(params=params) - test_assert.status(r, 400) rjson = r.json() - assert rjson['network_segments']['add'][0]['name'] == [""NetworkSegment name cannot contain any whitespace nor any special characters other than '_' or '-'""] - - + uuid = rjson[""uuid""] + segment_list = rjson['network_segments'] + for segment in segment_list: + if segment['name'] == 'network1': + seg_id = segment['uuid'] + params1 = { + 'network_segments': { + 'add': [ + { + 'name': 'network2' + } + ], + 'update': [ + { + 'uuid': seg_id, + 'name': 'network2' + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params1) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""Segment name should be unique for an island"" + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) " -/ilibrary/rest/add/,creating an island library and adding it when start_ip has value greater than that of end_ip,"{ - ""name"": ""test_ilibrary_add_bigger_start_ip"", +/ilibrary/rest/edit/{UUID}/,adding a Segment name which already exists but was deleted,"params = { + ""name"": ""test_ilibrary"", ""is_public"": True, - ""network_segments"": { - ""add"": [ + 'network_segments': { + 'add': [ { - ""name"": ""test_segment"", - ""enable_ipv4"": True, - ""bridge_ip"": ""192.168.98.0"", - ""network_subnet"": ""255.255.255.0"", - ""enable_dhcp"": True, - ""start_ip"": ""192.168.98.150"", - ""end_ip"": ""192.168.98.1"" + 'name': 'network1' } ] } + + } +params1 = { + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ], + 'delete': [ + { + 'uuid': seg_id, + } + ] + }, + ""is_public"": True }","{ -""status"" : 400, -""message"" : ""end_ip must be higher than start_ip"" -}","def test_ilibrary_add_bigger_start_ip(run_api): + ""status"": 201, + ""response"" : success +}","def test_ilibrary_edit_add_and_delete_segment_same_name(run_api): """""" - Creating an Island Library where start ip is bigger than end ip + Editing an Island Library Add a Segment name which already exists but is being deleted """""" params = { - ""name"": ""test_ilibrary_add_bigger_start_ip"", + ""name"": ""test_ilibrary"", ""is_public"": True, - ""network_segments"": { - ""add"": [ + 'network_segments': { + 'add': [ { - ""name"": ""test_segment"", - ""enable_ipv4"": True, - ""bridge_ip"": ""192.168.98.0"", - ""network_subnet"": ""255.255.255.0"", - ""enable_dhcp"": True, - ""start_ip"": ""192.168.98.150"", - ""end_ip"": ""192.168.98.1"" + 'name': 'network1' } ] } + } params, r = run_api.ilibrary_add_new_island(params=params) - test_assert.status(r, 400) rjson = r.json() - assert rjson['error'] == 'end_ip must be higher than start_ip' + uuid = rjson[""uuid""] + segment_list = rjson['network_segments'] + + for segment in segment_list: + if segment['name'] == 'network1': + seg_id = segment['uuid'] + + params1 = { + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ], + 'delete': [ + { + 'uuid': seg_id, + } + ] + }, + ""is_public"": True + } + params, r = run_api.ilibrary_edit_island(uuid, params=params1) + test_assert.status(r, 201) + # res = r.json() + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) + " -/ilibrary/rest/add/,creating an island library and adding it when the segment name for NIC segment is different from what is to being added for this Island,"{ - ""name"": ""test_ilibrary_add_machine_with_other_nic"", +/ilibrary/rest/edit/{UUID}/,adding segment with same name that already exists,"params = { + ""name"": ""test_ilibrary"", ""is_public"": True, - ""machines"": { - ""add"": [ + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + }, + } +params1 = { + 'network_segments': { + 'add': [ { - 'name': ""machine"", - ""uuid"": rjson[""uuid""], - ""nics"": { - ""add"": [ - { - ""model"": ""virtio"", - ""segment"": ""Other_segment"" - } - ] - } + 'name': 'network1' } ] - } + }, + ""is_public"": True }","{ -""status"" : 400, -""message"" : ""Provided name of Segment isn't part of this Island"" -}","def test_ilibrary_add_machine_other_nic(run_api, library_add_new_vm): + ""status"": 400, + ""message"": ""The segment name(s) is/are already taken for other island"" +}","def test_ilibrary_edit_add_segment_with_existing_name(run_api): """""" - Add segment name for NIC segment as different from what is to being added for this Island + Editing an Island Library add segment with existing name """""" - params, rjson = library_add_new_vm params = { - ""name"": ""test_ilibrary_add_machine_with_other_nic"", + ""name"": ""test_ilibrary"", ""is_public"": True, - ""machines"": { - ""add"": [ + 'network_segments': { + 'add': [ { - 'name': ""machine"", - ""uuid"": rjson[""uuid""], - ""nics"": { - ""add"": [ - { - ""model"": ""virtio"", - ""segment"": ""Other_segment"" - } - ] - } + 'name': 'network1' } ] - } + }, } params, r = run_api.ilibrary_add_new_island(params=params) - test_assert.status(r, 400) rjson = r.json() - assert rjson['error'] == ""Provided name [Other_segment] of Segment isn't part of this Island"" + uuid = rjson[""uuid""] -" -/ilibrary/rest/add/,creating an island library and adding it when user is unauthorized,"{ - ""name"": ""test_ilibrary_add_required_params"", + params1 = { + 'network_segments': { + 'add': [ + { + 'name': 'network1' + } + ] + }, ""is_public"": True -}","{ + } + params, r = run_api.ilibrary_edit_island(uuid, params=params1) + test_assert.status(r, 400) + res = r.json() + assert res['error'] == ""The segment name(s) {'network1'} is/are already taken for the island"" + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params) +" +/ilibrary/rest/list/,fetching list of details of island library by an admin user,,"{ +""status"" : 200, +""response"" : success +}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] + + +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_details_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Details of Ilibrary by Admin + """""" + # Admin check for fetching details of the Ilibrary created by different user. + ilibrary_id = custom_ilib_non_admin_operations + r = run_api.ilibrary_list_island({""uuid"": ilibrary_id}) + test_assert.status(r, 200) + assert r.json()[""count""] == 0 +" +/ilibrary/rest/list/,fetching list of details of ilibrary without token and authorization,,"{ ""status"" : 401, ""message"" : ""Authentication credentials were not provided."" -}","def test_ilibrary_add_without_authorization(anonymous_exec_api): +}","def test_ilibrary_list_without_token(anonymous_exec_api): """""" - Creating an Island Library without authorization + Fetch ilibrary list without token """""" - params = {} - params, r = anonymous_exec_api.ilibrary_add_new_island(params=params) - res = r.json() + r = anonymous_exec_api.ilibrary_list_island() + result = r.json() test_assert.status(r, 401) - assert res['detail'] == 'Authentication credentials were not provided.' + assert result['detail'] == ""Authentication credentials were not provided."" " -/ilibrary/rest/bulk_delete/,sucessful deletion of island library,,"{ -""status"" : 204 -}","def test_ilibrary_bulk_delete(ilibrary_bulk_delete): - """""" - Deleting multiple Island Library - """""" - params, r = ilibrary_bulk_delete - test_assert.status(r, 204) +/ilibrary/rest/list/,fetching list of details of ilibrary without providing any specific params,,"{ +""status"" : 200, +""response"" : success , list provided +}","def test_ilibrary_list_without_params(run_api, ilibrary_add_new_island): + """""""""""" + Lists all the Island Library + """""""""""" + params, r = ilibrary_add_new_island + r = run_api.ilibrary_list_island() + test_assert.status(r, 200) " -/ilibrary/rest/bulk_delete/,"deleting the island library using island_list, where the list is set to null","{ -""island_list"" :None +/ilibrary/rest/list/,fetching list of details of ilibrary with name that does not exist,"{ +name = ""invalid }","{ -""status"":400, -""message"" : ""island_list cannot be null or empty"" -}","def test_ilibrary_bulk_delete_null_island_list(run_api): +""status"" : 200, +""response"" : success , empty list +}","def test_ilibrary_list_with_invalid_name(run_api): """""" - Deleting ilibrary with empty and null island_list + Fetch ilibrary list using invalid name """""" - islands = { - ""island_list"": None - } - r = run_api.ilibrary_bulk_delete(islands) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""island_list cannot be null or empty"", ""|> Json %s"" % rjson + r = run_api.ilibrary_list_island(params={""name"": rand_string() + ""$$""}) + result = r.json() + test_assert.status(r, 200) + assert result[""count""] == 0 " -/ilibrary/rest/bulk_delete/,"deleting the island library using island_list, where the list is set to empty list","{ -""island_list"" :[] -}","{ -""status"":400, -""message"" : ""island_list cannot be null or empty"" -}","def test_ilibrary_bulk_delete_empty_list_island_list(run_api): +/ilibrary/rest/list/,"fetching list of details of ilibrary with added filters. Check the user type before performing the operation. +",,"{ +""status"" : 200, +""response"" : success , filtered list +}","def test_ilibrary_list_filter(run_api): """""" - Deleting ilibrary with empty and null island_list - """""" - islands = { - ""island_list"": [] + Getting the lists of Island Library by adding filters + """""" + params, res = [], [] + ilibrary_count = 10 + arch = run_api.arch_type + prefix_name = f""filter_island_1_{rand_string()}_"" + isl_lib_name = [f""{prefix_name}{rand_string()}"" for _ in range(ilibrary_count)] + networks = template_networks() + if arch == ""aarch64"": + params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + params3, r3 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params1, r1 = run_api.library_add_new_vm(networks=networks) + params2, r2 = run_api.library_add_new_vm(networks=networks) + params3, r3 = run_api.library_add_new_vm(networks=networks) + machine1 = { + ""uuid"": r1.json()[""uuid""], + ""nic_update_id"": r1.json()[""hw""][""networks""][0][""id""], + ""nic_delete_id"": r1.json()[""hw""][""networks""][2][""id""] } - r = run_api.ilibrary_bulk_delete(islands) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""island_list cannot be null or empty"", ""|> Json %s"" % rjson" -/ilibrary/rest/bulk_delete/,"deleting the island library using island_list, where the list is set to empty string","{ -""island_list"" : """" + machine2 = { + ""uuid"": r2.json()[""uuid""], + ""nic_update_id"": r2.json()[""hw""][""networks""][1][""id""], + ""nic_delete_id"": r2.json()[""hw""][""networks""][0][""id""] + } + machine3 = { + ""uuid"": r3.json()[""uuid""], + ""nic_update_id"": r3.json()[""hw""][""networks""][2][""id""], + ""nic_delete_id"": r3.json()[""hw""][""networks""][1][""id""] + } + for i in range(ilibrary_count): + param, r = run_api.ilibrary_add_new_island(machine1=machine1, machine2=machine2, + machine3=machine3, name=isl_lib_name[i]) + params.append(param) + res.append(r) + random_int = randint(0, 9) + name_filter = {""name"": res[random_int].json().get(""name""), ""page_size"": ilibrary_count} + uuid_filter = {""uuid"": res[random_int].json().get(""uuid""), ""page_size"": ilibrary_count} + owner_filter = {""owner"": ""colama"" if run_api.user_type == ""admin"" + else ""vivekt"" if run_api.user_type == ""non-admin"" + else ""manager"", ""search"": prefix_name, ""page_size"": ilibrary_count} + island_type_filter = {""island_type"": choice([""private"", ""public""]), ""search"": prefix_name, ""page_size"": ilibrary_count} + filters = [name_filter, uuid_filter, owner_filter, island_type_filter] + exp_res = { + 0: [i.get(""name"") for i in params if i.get(""name"") == name_filter.get(""name"")], + 1: [i.json().get(""uuid"") for i in res if i.json().get(""uuid"") == uuid_filter.get(""uuid"")], + 2: [i.json().get(""owner"") for i in res], + 3: [i.json().get(""island_type"") for i in res if i.json().get(""island_type"") == island_type_filter.get(""island_type"")] + } + for filter in range(len(filters)): + r = run_api.ilibrary_list_island(filters[filter]) + # check for valid response data with the filter parameters + if len(r.json().get(""results"")) != len(exp_res[filter]): + logging.error(f""error in filter: {filters[filter]}, the list of expected result for the filter is: {exp_res[filter]}, and the actual result is {r.json()}"") + assert False + + test_assert.status(r, 200) + run_api.library_delete(r1.json()[""uuid""], params1) + run_api.library_delete(r2.json()[""uuid""], params2) + run_api.library_delete(r3.json()[""uuid""], params3) + for i in range(ilibrary_count): + rjson = res[i].json() + if 'error' not in rjson.keys(): + uuid = rjson[""uuid""] + run_api.ilibrary_delete(uuid, params[i]) +" +/ilibrary/rest/list/,fetching list of details of ilibrary when some UUID is provided that does not exist ,"{ +uuid = ""invalid }","{ -""status"":400, -""message"" : ""island_list cannot be null or empty"" -}","def test_ilibrary_bulk_delete_empty_island_list(run_api): +""status"" : 200, +""response"" : success , empty list +}","def test_ilibrary_list_with_invalid_uuid(run_api): """""" - Deleting ilibrary with empty and null island_list + Fetch ilibrary list using invalid uuid """""" - islands = { - ""island_list"": """" - } - r = run_api.ilibrary_bulk_delete(islands) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""island_list cannot be null or empty"", ""|> Json %s"" % rjson" -/ilibrary/rest/bulk_delete/,deleting the island library using invalid data type of island_list,"{ -""island_list"": ""string"" -}","{ -""status"":400, -""message"" : ""Please provide the list of uuids not strings"" -}","def test_ilibrary_bulk_delete_invalid_data_type(run_api): + uid = ""invalid"" + r = run_api.ilibrary_list_island(params={""uuid"": uid}) + test_assert.status(r, 200) + +" +/ilibrary/rest/list/,fetching list of details of ilibrary using the name parameter,,"{ +""status"" : 200, +""response"" : success, list with specific name provided +}","def test_ilibrary_list_with_name(run_api, ilibrary_add_new_island): """""" - Deleting ilibrary with invalid data type island_list + Fetch ilibrary list valid name """""" - islands = { - ""island_list"": ""string"" - } - r = run_api.ilibrary_bulk_delete(islands) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""Please provide the list of uuids not strings"", ""|> Json %s"" % rjson + params, r = ilibrary_add_new_island + lib_name = r.json()[""name""] + result = run_api.ilibrary_list_island(params={""name"": lib_name}) + test_assert.status(result, 200) " -/ilibrary/rest/clone/{UUID}/,Clone a Private Island which you are not an owner of but with Admin Rights,"{ - ""name"": ""test_clone"", - ""description"": ""cloning private island by admin whose owner is not admin"" -}","{ - ""status"": 200, - ""response"": island library cloned -}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] - - -@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -def test_ilibrary_clone_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): +/ilibrary/rest/list/,fetching list of details of ilibrary for some existing UUID,,"{ +""status"" : 200, +""response"" : success , list provided +}","def test_ilibrary_list_with_uuid(run_api, ilibrary_add_new_island): """""" - Creating a clone of an private Island Library by admin whose owner is not admin user + Fetch ilibrary list using uuid """""" - ilib_id = custom_ilib_non_admin_operations - clone = { - ""name"": ""test_clone"", - ""description"": ""cloning private island by admin whose owner is not admin"" - } - params, r = run_api.ilibrary_clone_island(ilib_id, params=clone) - test_assert.status(r, 200) - rjson = r.json() - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) + params, r = ilibrary_add_new_island + ilib_uuid = r.json()[""uuid""] + result = run_api.ilibrary_list_island(params={""uuid"": ilib_uuid}) + test_assert.status(result, 200) " -/ilibrary/rest/clone/{UUID}/,clone a Public Island and check is_public flag on cloned island is False,"{ - ""name"": ""test_ilibrary_clone_public_island"", - ""is_public"": True - }","{ - ""status"": 200, - ""response"": island library cloned -}","def test_ilibrary_clone_public_island(run_api): +/ilibrary/rest/list/,fetching list of details of ilibrary by adding filters on created and update DateTime filter,,"{ +""status"" : 200, +""response"" : success , filtered list provided +}"," +def test_ilibrary_filter_timefilter(run_api: apiops, ilibrary_add_new_island): """""" - Creating a clone of an public Island Library and checking is_public flag on cloned island is False + Filter on created and update DateTime Filter """""" - params = { - ""name"": ""test_ilibrary_clone_public_island"", - ""is_public"": True - } - params, r = run_api.ilibrary_add_new_island(params=params) + template, r = ilibrary_add_new_island rjson = r.json() - uuid = rjson[""uuid""] - clone = { - ""name"": ""test_clone"", - ""description"": ""cloning private island by admin whose owner is not admin"" - } - params, r = run_api.ilibrary_clone_island(uuid, params=clone) - rjson1 = r.json() - test_assert.status(r, 200) - assert rjson1['is_public'] is False - if 'error' not in rjson1.keys(): - uuid = rjson1[""uuid""] - run_api.ilibrary_delete(uuid, params) - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) + ilib_id = rjson[""uuid""] + # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' + str_ctime = rjson['ctime'].replace('T', ' ').replace('Z', '') + datetime_ctime = convert_datetime_stringform(rjson['ctime']) + + def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): + """""" + Function to handle corner case if ilibrary image was created a day before and test get triggered on new day + """""" + if not utc: + created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, + ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 + # Filter on UTC time + # .... When the datetime is selected to be the same as in detail + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, + ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 + # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # .........When the created_date_range format is invalid + response = run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has whitespaces in them + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + + # Filter on IST time + # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 IST and test get triggered at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 IST and test get triggered on new week at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 IST and test get triggered on new month at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 IST and test get triggered on new year at 00:00:00.0000000 IST + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # .........When the created_date_range format is invalid + response = run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has whitespaces in them + assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 " -/ilibrary/rest/clone/{UUID}/,creating a clone of an ilibrary with name but no description,"{ - ""name"": ""test_clone"" -}","{ - ""status"": 200, - ""response"": island library cloned -}","def test_ilibrary_clone_with_name_only(run_api, ilibrary_add_new_island): +/ilibrary/rest/list/,fetching list of details of ilibrary by a non-admin user,,"{ +""status"" : 200, +""response"" : success +}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ilibrary_list_non_admin(skip_if_admin, custom_ilib_admin_operations, run_api): """""" - Creating a clone of an Island Library with name only + Details of Ilibrary by non-Admin """""" - params1, r1 = ilibrary_add_new_island - uuid = r1.json()[""uuid""] - clone = { - ""name"": ""test_clone"" - } - params, r = run_api.ilibrary_clone_island(uuid, params=clone) + # Non-admin check for fetching details of the Ilibrary created by different user. + ilibrary_id = custom_ilib_admin_operations + r = run_api.ilibrary_list_island({""uuid"": ilibrary_id}) test_assert.status(r, 200) - rjson = r.json() - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) - + assert r.json()[""count""] == 0 " -/ilibrary/rest/clone/{UUID}/,creating a clone of an ilibrary without name and description,"{ -}","{ - ""status"": 400, - ""response"": field required -}","def test_ilibrary_clone_without_name_and_description(run_api, ilibrary_add_new_island): +/ilibrary/rest/list/,fetching list of details of ilibrary,,"{ +""status"" : 200, +""response"" : success +}","def test_ilibrary_list(ilibrary_list): """""" - Creating a clone of an Island Library without name and without description + Getting the lists of Island Library """""" - params1, r1 = ilibrary_add_new_island - uuid = r1.json()[""uuid""] - params, r = run_api.ilibrary_clone_island(uuid, params={}) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['name'] == ['This field is required.'] + r = ilibrary_list + # test_assert.status(r, template, ""library_list"", ""name"") + test_assert.status(r, 200) " -/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library,,"{ +/ilibrary/rest/revisions/,getting the list of revisions in island library ,,"{ ""status"": 200, - ""response"": island library cloned -}","def test_ilibrary_clone(run_api, ilibrary_clone_island): + ""response"": revision list provided +}","def test_ilibrary_revisions(ilibrary_revisions): """""" - Creating a clone of an Island Library + Getting the lists of revisions in Island Library """""" - params, r = ilibrary_clone_island - result = r.json() - test_assert.status(params, result, ""ilibrary_clone"") + r = ilibrary_revisions test_assert.status(r, 200) " -/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library by non-admin user,"{ - ""name"": ""test_clone"", - ""description"": ""cloning private island without admin rights"" - }","{ - ""status"": 403 -}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] - -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_ilibrary_clone_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): - """""" - Creating a clone of an private Island Library without admin rights whose owner is not current user - """""" - ilib_id = custom_ilib_admin_operations - clone = { - ""name"": ""test_clone"", - ""description"": ""cloning private island without admin rights"" +/ilibrary/rest/revisions/,getting the list of revisions in ilibrary with filters,"FILTERS = [ + { + 'page': 1 + }, + { + 'page_size': 1 + }, + { + 'page': 1, + 'page_size': 1 } - params, r = run_api.ilibrary_clone_island(ilib_id, params=clone) - test_assert.status(r, 403) -" -/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library when Island UUID does not exist,"{ - uuid = 'invalid-ilibrary-uuid' -}","{ - ""status"": 404, - ""response"": not found -}","def test_ilibrary_clone_invalid_uuid(run_api): +]","{ + ""status"": 200, + ""response"": revision list provided +}","FILTERS = [ + { + 'page': 1 + }, + { + 'page_size': 1 + }, + { + 'page': 1, + 'page_size': 1 + } +] + + +@pytest.mark.parametrize('filter', FILTERS) +def test_ilibrary_revisions_page_num(run_api, ideploy_deploy, filter): """""" - Creating a clone of an Island Library with invalid uuid + Getting the lists of revisions in Island Library adding filters """""" - uuid = 'invalid-ilibrary-uuid' - params, r = run_api.ilibrary_clone_island(uuid) - test_assert.status(r, 404) + params, r = ideploy_deploy + x = r.json() + deploy_id = x[""deploy_uuid""] + r, rtask_details = run_api.ideploy_snapshot(deploy_id) + snapshot_id = rtask_details['result']['snapshotted_island_uuid'] + r = run_api.ilibrary_revisions(snapshot_id, filter) + test_assert.status(r, 200) + run_api.ilibrary_delete(snapshot_id, {}) " -/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library when requested with invalid token,"{ - uuid = 'invalid-ilibrary-uuid' +/ilibrary/rest/revisions/,getting the list of revisions in ilibrary when requested using invalid token,"{ + uuid = 'invalid-island-library-uuid' }","{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_ilibrary_clone_with_invalid_token(invalid_exec_api): +}","def test_ilibrary_revisions_invalid_token(invalid_exec_api): """""" - Creating a clone of an Island Library with invalid token + Getting the lists of revisions in Island Library with invalid token """""" - uuid = 'invalid-ilibrary-uuid' - params, r = invalid_exec_api.ilibrary_clone_island(uuid) + uuid = 'invalid-island-library-uuid' + r = invalid_exec_api.ilibrary_revisions(uuid) test_assert.status(r, 401) res = r.json() - assert res['detail'] == ""Invalid token."" -" -/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library with some name and empty description,"{ - ""name"": ""test_clone"", - ""description"": """" - }","{ - ""status"": 200, - ""response"": island library cloned -}","def test_ilibrary_clone_with_name_empty_description(run_api, ilibrary_add_new_island): - """""" - Creating a clone of an Island Library with name and empty description - """""" - params1, r1 = ilibrary_add_new_island - uuid = r1.json()[""uuid""] - clone = { - ""name"": ""test_clone"", - ""description"": """" - } - params, r = run_api.ilibrary_clone_island(uuid, params=clone) - test_assert.status(r, 200) - rjson = r.json() - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) - + assert res['detail'] == 'Invalid token.' " -/ilibrary/rest/clone/{UUID}/,creating a clone of an Island Library without Authorization,"{ - uuid = 'valid-ilibrary-uuid' +/ilibrary/rest/revisions/,getting the list of revisions in ilibrary without Authorization,"{ + uuid = 'valid-island-library-uuid' }","{ ""status"" : 401, ""message"" : ""Authentication credentials were not provided."" -}","def test_ilibrary_clone_without_authorization(anonymous_exec_api): +}","def test_ilibrary_revisions_without_authorization(anonymous_exec_api): """""" - Creating a clone of an Island Library without authorization + Getting the lists of revisions in Island Library without authorization """""" - uuid = 'valid-ilibrary-uuid' - params, r = anonymous_exec_api.ilibrary_clone_island(uuid) + uuid = 'valid-island-library-uuid' + r = anonymous_exec_api.ilibrary_revisions(uuid) test_assert.status(r, 401) res = r.json() - assert res['detail'] == ""Authentication credentials were not provided."" + assert res['detail'] == 'Authentication credentials were not provided.' " -/ilibrary/rest/delete/{UUID}/,deleting a Island by an non-admin user who does not own the library,"{ - uuid = 'valid-existing-island-library-uuid' +/ilibrary/rest/revisions/,getting the list of revisions in ilibrary when Island UUID does not exist,"{ + uuid = 'invalid-island-library-uuid' }","{ - ""status"": 403, - ""response"": unauthorized -}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_ilib_delete_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): + ""status"": 404, + ""response"": not found +}","def test_ilibrary_revisions_invalid_uuid(run_api): """""" - Deleting the private Ilibrary by non-Admin + Getting the lists of revisions in Island Library with invalid uuid """""" - # Non-admin check for deleting the private Ilibrary created by different user. - ilib_id = custom_ilib_admin_operations - r = run_api.ilibrary_delete(ilib_id, {}) - test_assert.status(r, 403) + uuid = 'invalid-island-library-uuid' + r = run_api.ilibrary_revisions(uuid) + test_assert.status(r, 404) " -/ilibrary/rest/delete/{UUID}/,deleting a Island by manager,"{ - uuid = 'valid-existing-island-library-uuid' -}","{ - ""status"": 403 -}","endpoint = ""ilibrary_delete"" - -PARAMETERS = [{""dest_obj"": OBJ_ISL}] - -@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_ilib_delete_manager(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): +/library/rest/add,adding vm to library without Authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_add_vm_to_library_without_authorization(anonymous_exec_api): """""" - Delete the Ilibrary by Manager + without authorization """""" - # When the user is not part of the group that the manager manages - ilib_id = custom_ilib_admin_operations - r = run_api.ilibrary_delete(ilib_id, {}) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) - # When the user is part of the group that the manager manages - ilib_id = custom_ilib_non_admin_operations - r = run_api.ilibrary_delete(ilib_id, {}) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) + params, response = anonymous_exec_api.library_add_new_vm(noraise=True) + test_assert.status(response, 401) + rjson = response.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) " -/ilibrary/rest/delete/{UUID}/,deleting a Private Island by an admin user where the admin does not own the island,"{ - uuid = 'valid-existing-island-library-uuid' -}","{ - ""status"": 204, - ""response"": Island library deleted -}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] +/library/rest/add,adding vm to library when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_add_vm_to_library_invaild_token(invalid_exec_api): + """""" + invalid Token + """""" -@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -def test_ilib_delete_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + params, response = invalid_exec_api.library_add_new_vm(noraise=True) + test_assert.status(response, 401) + rjson = response.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/add,adding vm to library when provided with valid data,,"{ + ""status"": 201, + ""response"": Machine details +} +","def test_add_vm_to_library_with_vaild_data(run_api,): """""" - Deleting the private Ilibrary by Admin + When provided with valid data """""" - # Admin check for deleting the private Ilibrary created by different user. - ilib_id = custom_ilib_non_admin_operations - r = run_api.ilibrary_delete(ilib_id, {}) - test_assert.status(r, 204) + params, response = run_api.library_add_new_vm() + test_assert.status(response, 201) + UUID = response.json()[""UUID""] + run_api.library_delete(UUID, {}) " -/ilibrary/rest/delete/{UUID}/,deleting a public Island by an non-admin user who does not own the library,"{ - uuid = 'valid-existing-island-library-uuid' -}","{ - ""status"": 403, - ""response"": unauthorized -}","def test_public_ilib_delete_non_admin(skip_if_not_non_admin, run_api, admin_exec_api, non_admin_exec_api): +/library/rest/add,adding vm to library when multiple bootable cds and same boot order is passed,"cdrom = [{ ""type"": ""sata"", ""iso"": """", ""is_boot"": True, ""boot_order"": 1 }, { ""type"": ""sata"", ""iso"": """", ""is_boot"": True, ""boot_order"": 1 }]","{ +""status"" : 400, +""response"" : Bad request +}","def test_add_vm_to_library_multiple_bootable_cds_with_same_boot_order(run_api): + """""" + If multiple bootable cds with same boot order is passed + """""" + cdrom = [{ + ""type"": ""sata"", + ""iso"": """", + ""is_boot"": True, + ""boot_order"": 1 + }, + { + ""type"": ""sata"", + ""iso"": """", + ""is_boot"": True, + ""boot_order"": 1 + }] + + params, response = run_api.library_add_new_vm(cdrom=cdrom, noraise=True) + test_assert.status(response, 400) +" +/library/rest/add,"adding vm to library when machine name contains ""#""","{ 'name': newtxt, 'noraise': True }","{ + ""status"": 401, + ""message"": ""Name cannot contain '/' or '#"" +} +","def test_add_vm_to_library_with_name_contains_hash(run_api): """""" - Deleting the public Ilibrary by Non-Admin + if machine name contains ""#"" """""" - # Non-Admin check for deleting the public Ilibrary created by different user. - networks = template_networks() - params, r_lib = admin_exec_api.library_add_new_vm(networks=networks) - rjson_lib = r_lib.json() - machine = { - ""uuid"": rjson_lib[""uuid""], - ""nic_update_id"": rjson_lib[""hw""][""networks""][0][""id""], - ""nic_delete_id"": rjson_lib[""hw""][""networks""][2][""id""] - } - island = template_add_ilibrary_one_machine(machine=machine) - island['is_public'] = True - params, r_isl = admin_exec_api.ilibrary_add_new_island(params=island) - rjson_isl = r_isl.json() - ilib_id = rjson_isl[""uuid""] - r = run_api.ilibrary_delete(ilib_id, {}) - test_assert.status(r, 403) - if 'error' not in rjson_isl.keys(): - uuid = rjson_isl[""uuid""] - admin_exec_api.ilibrary_delete(uuid, params) - if 'error' not in rjson_lib.keys(): - uuid = rjson_lib[""uuid""] - admin_exec_api.library_delete(uuid, params) + txt = rand_string() + random_index = random.randint(0, len(txt)) + newtxt = txt[:random_index] + random.choice(['#', '/']) + txt[random_index:] + kwargs = { + 'name': newtxt, + 'noraise': True + } + params, response = run_api.library_add_new_vm(**kwargs) + test_assert.status(response, 400) + rjson = response.json() + assert rjson[""error""] == ""Name cannot contain '/' or '#"", ""The error message is {}"".format(rjson[""error""]) " -/ilibrary/rest/delete/{UUID}/,deleting a public Island by user with Admin rights but not owner of the library,"{ - uuid = 'valid-existing-island-library-uuid' -}","{ - ""status"": 204, - ""response"": Island library deleted -}","def test_public_ilib_delete_admin(skip_if_not_admin, run_api, non_admin_exec_api): +/library/rest/add,adding vm to library when disks of IDE type are passed with is_uefi set to True,"disks = [{""size"": 20, ""port"": ""hda"", ""type"": ""ide"", ""format"": ""qcow2"", ""is_boot"": False}] +","{ +""status"" : 400, +""response"" : Bad request +}"," +def test_add_vm_to_library_ide_type_passed_with_uefi_true(run_api): """""" - Deleting the public Ilibrary by Admin + if ide type passed with uefi true """""" - # Admin check for deleting the public Ilibrary created by different user. - networks = template_networks() - params, r_lib = non_admin_exec_api.library_add_new_vm(networks=networks) - rjson_lib = r_lib.json() - - machine = { - ""uuid"": rjson_lib[""uuid""], - ""nic_update_id"": rjson_lib[""hw""][""networks""][0][""id""], - ""nic_delete_id"": rjson_lib[""hw""][""networks""][2][""id""] - } - island = template_add_ilibrary_one_machine(machine=machine) - island['is_public'] = True - params, r_isl = non_admin_exec_api.ilibrary_add_new_island(params=island) - rjson_isl = r_isl.json() - ilib_id = rjson_isl[""uuid""] - r = run_api.ilibrary_delete(ilib_id, {}) - test_assert.status(r, 204) - if 'error' not in rjson_lib.keys(): - uuid = rjson_lib[""uuid""] - non_admin_exec_api.library_delete(uuid, params) + disks = [{""size"": 20, ""port"": ""hda"", ""type"": ""ide"", ""format"": ""qcow2"", ""is_boot"": False}] + params, response = run_api.library_add_new_vm(disks=disks, noraise=True, is_uefi=True) + test_assert.status(response, 400) " -/ilibrary/rest/delete/{UUID}/,deleting an Island which has existing deployments,"{ - uuid = 'valid-existing-island-library-uuid' +/library/rest/adddisk/{{UUID}}/ ,adding disk to library without Authorization,"{ +lib_id = ""doesnotexits"" }","{ - ""status"": 400, - ""response"": failure -}","def test_ilib_delete_deployed(run_api, ilibrary_add_new_island): +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_lib_add_disk_without_authorization(anonymous_exec_api): """""" - Delete Island which has existing deployments + without authorization """""" - template, r = ilibrary_add_new_island - isl_uuid = r.json()[""uuid""] - r = run_api.ideploy_deploy(isl_uuid) - deploy_id = r.json()[""deploy_uuid""] - r = run_api.ilibrary_delete(isl_uuid, {}) - test_assert.status(r, 400) - run_api.ideploy_delete(deploy_id) + lib_id = ""doesnotexits"" + r = anonymous_exec_api.library_add_disk(lib_id) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) " -/ilibrary/rest/delete/{UUID}/,deleting an Island which has next revisions,"{ - uuid = 'valid-existing-island-library-uuid' +/library/rest/adddisk/{{UUID}}/ ,adding disk to library when requested with invalid token,"{ +lib_id = ""doesnotexits"" }","{ - ""status"": 400, - ""response"": failure -}","def test_ilib_delete_with_next_revision(run_api, ilibrary_add_new_island): +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_lib_add_disk_with_invalid_token(invalid_exec_api): """""" - Delete Island which has next revision + with invalid token """""" - template, r = ilibrary_add_new_island - isl_uuid = r.json()[""uuid""] - r = run_api.ideploy_deploy(isl_uuid) - deploy_id = r.json()[""deploy_uuid""] - r, rtask_details = run_api.ideploy_snapshot(deploy_id) - run_api.ideploy_delete(deploy_id) - r = run_api.ilibrary_delete(isl_uuid, {}) - test_assert.status(r, 400) - run_api.ilibrary_delete(rtask_details['result']['snapshotted_island_uuid'], {}) - r = run_api.ilibrary_delete(isl_uuid, {}) + lib_id = ""doesnotexits"" + r = invalid_exec_api.library_add_disk(lib_id) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) " -/ilibrary/rest/delete/{UUID}/,deleting island library for existing valid data,,"{ - ""status"": 204, - ""response"": Island library deleted -}","def test_ilib_delete(ilibrary_delete): +/library/rest/adddisk/{{UUID}}/ ,adding disk to library When provided correct UUID and correct data,,"{ +""status"" : 201, +""response"" : Disks should be added to lib +}"," +PARAMETERS = [{""dest_obj"": OBJ_LIB}] + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_lib_add_disk_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): """""" - Deleting the Ilibrary + adding disk to a library by Admin """""" - r = ilibrary_delete - test_assert.status(r, 204) + # Admin check for adding disk to a library created by different user. + lib_id = custom_lib_non_admin_operations + r = run_api.library_add_disk(lib_id) + test_assert.status(r, 201) " -/ilibrary/rest/delete/{UUID}/,deleting island library when Island UUID does not exist,"{ - uuid = 'invalid-island-library-uuid' -}","{ - ""status"": 404, - ""message"": ""Not found"" -}","def test_ilib_delete_invalid_uuid(run_api): +/library/rest/adddisk/{{UUID}}/ ,adding disk to library if tried to add IDE type disks for UEFI enabled library,"params = { + 'type': 'ide', + 'port': 'hdc' + }","{ +""status"" : 400 +}","def test_lib_add_disk_with_uefi_enabled(run_api, ): + lib_params, r = run_api.library_add_new_vm(noraise=True, is_uefi=True) + lib_UUID = r.json()[""UUID""] + params = { + 'type': 'ide', + 'port': 'hdc' + } + r = run_api.library_add_disk(lib_UUID, params) + test_assert.status(r, 400) + run_api.library_delete(lib_UUID, lib_params) +" +/library/rest/adddisk/{{UUID}}/ ,adding disk to library if tried to add a disk with same boot order as existing disk,"params = { + 'boot_order': 1 + }","{ +""status"" : 400, +""response"" : Bad Request +}","def test_lib_add_disk_with_same_boot_order(run_api, library_add_new_vm): + lib_params, r = library_add_new_vm + lib_UUID = r[""UUID""] + params = { + 'boot_order': 1 + } + r = run_api.library_add_disk(lib_UUID, params) + test_assert.status(r, 400) +" +/library/rest/boot types/,getting boot type list when Requested,,"{ +""status"" : 200, +""response"" : Boot type list +}","def test_library_boottypes(run_api): """""" - Delete Island with invalid uuid + Getting the list of Boot type """""" - r = run_api.ilibrary_delete(""invalid-island-uuid"", {}) - test_assert.status(r, 404) - assert res['detail'] == 'Not found.' + r = run_api.library_boottypes() + result = r.json() + test_assert.status(result, LIBRARY_BOOT_TYPE, ""library_boottypes"") + test_assert.status(r, 200) " -/ilibrary/rest/delete/{UUID}/,deleting island library when requested with invalid token,"{ - uuid = 'invalid-island-library-uuid' -}","{ +/library/rest/boottypes/,getting boot type list when requested with invalid token,,"{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_ilib_delete_invalid_token(invalid_exec_api): +}","def test_library_boottypes_with_invalid_token(invalid_exec_api): """""" - Delete Island with invalid token + Getting the list of Boot type when invalid token provided """""" - r = invalid_exec_api.ilibrary_delete(""invalid-island-uuid"", {}) + r = invalid_exec_api.library_boottypes() test_assert.status(r, 401) - res = r.json() - assert res['detail'] == 'Invalid token.' + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) " -/ilibrary/rest/delete/{UUID}/,deleting island library without Authorization,"{ - uuid = 'valid-island-library-uuid' -}","{ +/library/rest/bulkdelete/,deployment of deletion of machines in bulk without Authorization,"machine = { ""machine_list"": [] }","{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided."" -}","def test_ilib_delete_without_authentication(anonymous_exec_api): +""message"" : ""Authentication credentials were not provided"" +}","def test_library_bulk_delete_without_authorization(anonymous_exec_api): """""" - Delete Island without authorization + without authorization """""" - r = anonymous_exec_api.ilibrary_delete(""valid-island-uuid"", {}) - test_assert.status(r, 401) - res = r.json() - assert res['detail'] == 'Authentication credentials were not provided.' + machine = { + ""machine_list"": [] + } + res = anonymous_exec_api.library_bulkdelete(machine) + + test_assert.status(res, 401) + rjson = res.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The Error is {}"".format(rjson['detail']) " -/ilibrary/rest/details/{UUID}/,fetching details of island library,,"{ - ""status"": 200, - ""response"" : success -}","def test_ilibrary_details(ilibrary_details): +/library/rest/bulkdelete/,deployment of deletion of machines in bulk when requested with invalid token,"machine = { ""machine_list"": [] }","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_bulk_delete_with_invalid_token(invalid_exec_api): + """""" + Invalid token + """""" + machine = { + ""machine_list"": [] + } + res = invalid_exec_api.library_bulkdelete(machine) + + test_assert.status(res, 401) + rjson = res.json() + assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) +" +/library/rest/bulkdelete/,deployment of deletion of machines in bulk when requested with invalid token,"machine = { ""machine_list"": ['invalid'] }","{ +""status"" : 400, +""message"" : Machine matching query does not exist."" +}","def test_library_bulk_delete_invalid_id(run_api): """""" - Getting Ilibrary details + provide invalid machine id """""" - r = ilibrary_details - test_assert.status(r, 200) + machine = { + ""machine_list"": ['invalid'] + } + res = run_api.library_bulkdelete(machine) + + test_assert.status(res, 400) + rjson = res.json() + assert rjson['failure'][0]['error'] == ""Machine matching query does not exist."", ""|> The Error is {}"".format(rjson) " -/ilibrary/rest/details/{UUID}/,fetching details of island library by admin user,"{ -ilib_id -}","{ - ""status"": 200, - ""response"": success -}","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -def test_ilibrary_details_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): +/library/rest/bulkdelete/,deployment of deletion of machines in bulk when passed a list UUIDs of all deletable machines,,"{ +""status"" : 204, +""response"" : ""Machine deleted successfully"" +}","def test_library_bulk_delete(library_bulkdelete): """""" - Details of Ilibrary by Admin + Deleting multiple VM's """""" - # Admin check for fetching details of the Ilibrary created by different user. - ilibrary_id = custom_ilib_non_admin_operations - r = run_api.ilibrary_details(ilibrary_id) - test_assert.status(r, 200) + params, r = library_bulkdelete + test_assert.status(r, 204) " -/ilibrary/rest/details/{UUID}/,fetching details of island library by manager,"{ -ilib_id -}",,"endpoint = ""ilibrary_details"" -networks = template_networks() - -PARAMETERS = [{""dest_obj"": OBJ_ISL}] -@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_ilibrary_details_manager(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): +/library/rest/bulkdelete/,deployment of deletion of machines in bulk when empty list of UUIDs is passed ,"machine = { ""machine_list"": [] }","{ +""status"" : 400 +}","def test_library_bulk_delete_with_empty_list(run_api): """""" - Details of Ilibrary by Manager + When empty list is passed """""" - # When the user is not part of the group that the manager manages - ilibrary_id = custom_ilib_admin_operations - r = run_api.ilibrary_details(ilibrary_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) + machine = { + ""machine_list"": [] + } + res = run_api.library_bulkdelete(machine) - # When the user is part of the group that the manager manages - ilibrary_id = custom_ilib_non_admin_operations - r = run_api.ilibrary_details(ilibrary_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) + test_assert.status(res, 400) + rjson = res.json() + assert rjson['error'] == ""machine_list cannot be null or empty"", ""|> Json %s"" % rjson " -/ilibrary/rest/details/{UUID}/,fetching details of island library by non-admin user,"{ -lib_id +/library/rest/clone/{{UUID}}/,cloning library without Authorization,"{ +UUID = 'doesnotexits' }","{ - ""status"": 403, - ""message"": ""You do not have permission to perform this action."" -}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] - -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_ilibrary_details_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_library_clone_without_authorization(anonymous_exec_api): """""" - Details of Ilibrary by non-Admin + clone without authorization """""" - # Non-admin check for fetching details of the Ilibrary created by different user. - ilibrary_id = custom_ilib_admin_operations - r = run_api.ilibrary_details(ilibrary_id) - test_assert.status(r, 403) - assert r.json()[""error""] == ""You do not have permission to perform this action."" + + UUID = 'doesnotexits' + clone_params, clone_r = anonymous_exec_api.library_clone_vm(UUID) + test_assert.status(clone_r, 401) + rjson = clone_r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The Error is {}"".format(rjson['detail']) " -/ilibrary/rest/details/{UUID}/,fetching details of island library provided with invalid UUID,"{ - uid = ""invalid"" +/library/rest/clone/{{UUID}}/,cloning library when requested with invalid token,"{ +UUID = 'doesnotexits' }","{ - ""status"": 404, - ""message"": ""Not Found"" -}","def test_ilibrary_details_with_invalid_uuid(run_api): +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_clone_invaild_token(invalid_exec_api): """""" - Details of Ilibrary with invalid uuid + clone request with invalid token """""" - uid = ""invalid"" - r = run_api.ilibrary_details(uuid=uid) - test_assert.status(r, 404) + + UUID = 'doesnotexits' + clone_params, clone_r = invalid_exec_api.library_clone_vm(UUID) + test_assert.status(clone_r, 401) + rjson = clone_r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) " -/ilibrary/rest/details/{UUID}/,fetching details of island library provided with valid UUID,"{ -ilib_id +/library/rest/clone/{{UUID}}/,cloning library when Provided with machine UUID that does not exist,"{ +UUID = 'doesnotexits' }","{ - ""status"": 200, - ""response"" : success -}","def test_ilibrary_details_with_valid_uuid(run_api, ilibrary_add_new_island): - """""" - Details of Ilibrary with valid uuid - """""" - params, r = ilibrary_add_new_island - lib_uuid = r.json()[""uuid""] - result = run_api.ilibrary_list_island(params={""uuid"": lib_uuid}) - x = result.json() - test_assert.status(result, 200) - for island_lib in x['results']: - assert island_lib['uuid'] == lib_uuid, ""Json is %s"" % x -" -/ilibrary/rest/details/{UUID}/,fetching details of island library when all machines are assigned with multiple NICs and island type is public,"{ - ""name"": ""Machine1"", - ""is_public"": True, - ""machines"": { - ""add"": [machine1, machine2, machine3], - }, - ""network_segments"": { - ""add"": [ - { - ""name"": ""Seg1"", - ""description"": ""string"", - ""enable_ipv4"": False - }, - { - ""name"": ""Seg2"", - ""description"": ""string"", - ""enable_ipv4"": False - }, - { - ""name"": ""Seg3"", - ""description"": ""string"", - ""enable_ipv4"": False - } - ] - } - }","{ - ""response"" : success -}","def test_ilibrary_details_with_island_type_public_with_three_segement(run_api, library_add_three_vm): +""status"" : 404, +""message"" : ""Clone : Machine not found"" +}","def test_library_clone_with_wrong_machine_UUID(library_add_new_vm, run_api): """""" - Detail of island_type when island all machines are assigned with multiple NICs and island type is public + when Provided machine UUID does not exist """""" - r1, r2, r3 = library_add_three_vm - machine1 = { - ""uuid"": r1.json()[""uuid""], - ""nics"": { - ""add"": [ - { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Default Public Segment"" - }, - { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Seg1"" - } - ], - } - - } - machine2 = { - ""uuid"": r2.json()[""uuid""], - ""nics"": { - ""add"": [ - { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Seg1"" - }, - { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Seg2"" - } - ], - } - - } - machine3 = { - ""uuid"": r3.json()[""uuid""], - ""nics"": { - ""add"": [ - { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Seg2"" - }, - { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Seg3"" - } - ], - } - - } - params = { - ""name"": ""Machine1"", - ""is_public"": True, - ""machines"": { - ""add"": [machine1, machine2, machine3], - }, - ""network_segments"": { - ""add"": [ - { - ""name"": ""Seg1"", - ""description"": ""string"", - ""enable_ipv4"": False - }, - { - ""name"": ""Seg2"", - ""description"": ""string"", - ""enable_ipv4"": False - }, - { - ""name"": ""Seg3"", - ""description"": ""string"", - ""enable_ipv4"": False - } - ] - } - - } - - params, r = run_api.ilibrary_add_new_island(params=params) - island_type = r.json()[""island_type""] - island_id = r.json()[""uuid""] - run_api.ilibrary_delete(uuid=island_id) - assert island_type == ""public"", ""The json is %s"" % r.json() - + UUID = 'doesnotexits' + clone_params, clone_r = run_api.library_clone_vm(UUID) + test_assert.status(clone_r, 404) + rjson = clone_r.json() + assert rjson['error'] == ""Clone: Machine not found"", ""|> The error message is {}"".format(rjson['error']) " -/ilibrary/rest/details/{UUID}/,fetching details of island library when island has one machine nic as Default and other machine nic as empty and island type is partial,"{ - ""name"": ""Machine1"", - ""is_public"": True, - ""machines"": { - ""add"": [machine1, machine2, machine3], - }, - - }","{ - ""response"" : success -}","def test_ilibrary_details_with_island_type_partial(run_api, library_add_three_vm): +/library/rest/clone/{{UUID}}/,cloning library when duplicate mac provided,"networks = [ + { + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", + ""mac"": generate_mac_address() + } + ]","{ +""status"" : 400, +""message"" : ""Mac is already present"" +}","@pytest.mark.skip(""Return 400 but create a clone of vm"") +def test_library_clone_duplicate_mac(run_api): """""" - Detail of island_type when island has one machine nic as Default and other machine nic as empty and island type is partial + library clone with duplicate mac provided """""" - r1, r2, r3 = library_add_three_vm - machine1 = { - ""uuid"": r1.json()[""uuid""], - ""nics"": { - ""add"": [ - { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Default Public Segment"" - } - ], + networks = [ + { + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", + ""mac"": generate_mac_address() } - - } - machine2 = { - ""uuid"": r2.json()[""uuid""], - + ] + params, r = run_api.library_add_new_vm(networks=networks) + rjson = r.json() + mac = rjson['hw']['networks'][-1]['mac'] + name = rjson['name'] + cl_name = rand_string() + clone = { + ""mac_list"": [mac,], + ""name"": cl_name, + ""description"": ""This is test description for %s"" % cl_name, } - machine3 = { - ""uuid"": r3.json()[""uuid""], - + UUID = rjson['UUID'] + clone_params, clone_r = run_api.library_clone_vm(UUID, clone) + test_assert.status(clone_r, 400) + clone_rjson = clone_r.json() + assert clone_rjson['error'] == ""Mac is already present in %s"" % name, ""|> The Error is {}"".format(clone_rjson) + run_api.library_delete(UUID) +" +/library/rest/clone/{{UUID}}/,cloning library when clone name is empty,"clone = { + ""mac_list"": [], + ""name"": """", + ""description"": ""This is test description for %s"", } - params = { - ""name"": ""Machine1"", - ""is_public"": True, - ""machines"": { - ""add"": [machine1, machine2, machine3], - }, - +","{ +""status"" : 400, +""message"" : ""Please provide clone name"" +}","def test_library_clone_with_empty_name(library_add_new_vm, run_api): + """""" + Empty name + """""" + clone = { + ""mac_list"": [], + ""name"": """", + ""description"": ""This is test description for %s"", } - params, r = run_api.ilibrary_add_new_island(params=params) - island_type = r.json()[""island_type""] - island_id = r.json()[""uuid""] - run_api.ilibrary_delete(uuid=island_id) - assert island_type == ""partial"", ""The json is %s"" % r.json() + params, r = library_add_new_vm + UUID = r['UUID'] + clone_params, clone_r = run_api.library_clone_vm(UUID, clone) + test_assert.status(clone_r, 400) + rjson = clone_r.json() + assert rjson['error'] == ""Please provide clone name"", ""|> The Error is {}"".format(rjson) " -/ilibrary/rest/details/{UUID}/,fetching details of island library when private machine is added to public island,"{ - ""name"": ""Machine1"", - ""is_public"": True, - ""machines"": { - ""add"": [machine1], - }, +/library/rest/clone/{{UUID}}/,cloning library when clone name contains #,"clone = { + ""mac_list"": [], + ""name"": cl_name, + ""description"": ""This is test description for %s"" % cl_name, }","{ - ""response"" : success -}","def test_ilibrary_details_with_public_island_with_private_machine(run_api): +""status"" : 400, +""message"" : ""Name cannot contain '/' or '#'"" +}","def test_library_clone_name_contains_hash(library_add_new_vm, run_api): """""" - To check machine type when Private machine is added to public island + When clone name contains # """""" - params1, r1 = run_api.library_add_new_vm(networks=networks, is_public=False) - machine1 = { - ""uuid"": r1.json()[""uuid""], - ""is_public"": False, - ""nics"": { - ""add"": [ - { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Default Public Segment"" - }, - - ], - } + txt = rand_string() + random_index = random.randint(0, len(txt)) + newtxt = txt[:random_index] + random.choice(['#', '/']) + txt[random_index:] + cl_name = f""{newtxt}_cl"" + clone = { + ""mac_list"": [], + ""name"": cl_name, + ""description"": ""This is test description for %s"" % cl_name, } - params = { - ""name"": ""Machine1"", - ""is_public"": True, - ""machines"": { - ""add"": [machine1], - }, - } - params, r = run_api.ilibrary_add_new_island(params=params) - result = r.json()[""machines""] - island_id = r.json()[""uuid""] - run_api.ilibrary_delete(uuid=island_id) - run_api.library_delete(r1.json()[""uuid""]) - for machine in result: - if not machine[""is_public""]: - assert False, ""The machine is still private in public island and the json is %s"" % r.json() - + params, r = library_add_new_vm + UUID = r.get('UUID', 'doesnotexits') + clone_params, clone_r = run_api.library_clone_vm(UUID, clone) + test_assert.status(clone_r, 400) + rjson = clone_r.json() + assert rjson['error'] == ""Name cannot contain '/' or '#"", ""|> The error is {}"".format(rjson['error']) " -/ilibrary/rest/details/{UUID}/,fetching details of island library where island type is set to public,"{ - ""name"": ""Machine1"", - ""is_public"": True, - ""machines"": { - ""add"": [machine1, machine2, machine3], - }, -}","{ - ""response"" : success -}","def test_ilibrary_details_with_island_type_public(run_api, library_add_three_vm): - """""" - Detail of island_type when all machines have NIC as Default Public Segment - """""" - r1, r2, r3 = library_add_three_vm - machine1 = { - ""uuid"": r1.json()[""uuid""], - ""nics"": { - ""add"": [ - { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Default Public Segment"" - - } - ], - } +/library/rest/clone/{{UUID}}/,"cloning a library when provided with valid data. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 200, +""response"" : Data of newly cloned machine +}","endpoint = ""lib_clone"" +PARAMETERS = [{""dest_obj"": OBJ_LIB}] - } - machine2 = { - ""uuid"": r2.json()[""uuid""], - ""nics"": { - ""add"": [ - { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Default Public Segment"" - } - ], - } - } - machine3 = { - ""uuid"": r3.json()[""uuid""], - ""nics"": { - ""add"": [ - { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Default Public Segment"" - } - ], - } +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_library_clone(library_clone_vm, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Cloning VM + """""" + template, r = library_clone_vm + result = r.json() + test_assert.status(template, result, ""library_clone"") + test_assert.status(r, 200) - } - params = { - ""name"": ""Machine1"", - ""is_public"": True, - ""machines"": { - ""add"": [machine1, machine2, machine3], - }, - } + # Adding non_admin check to Clone a Library Image created by different user + if run_api.user_type == USER_TYPE[""non_admin""]: + lib_id = custom_lib_admin_operations + param, r = run_api.library_clone_vm(lib_id) + test_assert.status(r, 403) - params, r = run_api.ilibrary_add_new_island(params=params) - island_type = r.json()[""island_type""] - island_id = r.json()[""uuid""] - run_api.ilibrary_delete(uuid=island_id) - assert island_type == ""public"", ""The json is %s"" % r.json() + if run_api.user_type == USER_TYPE[""manager""]: + # When the user is not part of the group that the manager manages + lib_id = custom_lib_admin_operations + param, r = run_api.library_clone_vm(lib_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) + + # When the user is part of the group that the manager manages + lib_id = custom_lib_non_admin_operations + param, r = run_api.library_clone_vm(lib_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) + param[""UUID""] = lib_id + clone_UUID = r.json()['UUID'] + run_api.library_delete(clone_UUID, param)" +/library/rest/ctypes/,getting the console type when requested without Authorization,,"{ +""status"" : 200, +""response"" : console type details displayed +}","def test_library_ctypes_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + r = anonymous_exec_api.library_console_types() + result = r.json() + test_assert.status(result, LIBRARY_CONSOLE_TYPE, ""library_ctypes"") + test_assert.status(r, 200) " -/ilibrary/rest/details/{UUID}/,fetching details of island library with invalid token,"{ - uid = ""invalid"" +/library/rest/delete/{UUID}/,deleting a library without Authorization,"{ +lib_id = 'wrong' }","{ ""status"" : 401, -""message"" : ""Invalid token"" -}","def test_ilibrary_details_with_invalid_token(invalid_exec_api): +""message"" : ""Authentication credentials were not provided"" +}","def test_lib_delete_without_authorization(anonymous_exec_api): """""" - Details of Ilibrary with invalid token + without authorization """""" - uid = ""invalid"" - r = invalid_exec_api.ilibrary_details(uuid=uid) - result = r.json() - test_assert.status(r, 401) - assert result['detail'] == ""Invalid token."" + lib_id = 'wrong' + ret = anonymous_exec_api.library_delete(lib_id) + test_assert.status(ret, 401) + rjson = ret.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The Error is {}"".format(rjson['detail']) " -/ilibrary/rest/details/{UUID}/,fetching details of island library with no NIC and island type is private,"{ - ""name"": ""Machine1"", - ""is_public"": True, - ""machines"": { - ""add"": [machine1, machine2, machine3], - }, - - }","{ - ""response"" : success -}","def test_ilibrary_details_with_island_type_Zero_NIC(run_api, library_add_three_vm): +/library/rest/delete/{UUID}/,deleting a library when UUID exists and it has next revision/ deployment exists,,"{ +""status"" : 400, +""response"" : Bad Request +}","def test_lib_delete_with_deployment_exists(run_api, library_add_new_vm): """""" - Detail of island_type when all machines have assigned with No NIC's and island type is private + When UUID exists and it has next revision/ deployment exists """""" - r1, r2, r3 = library_add_three_vm - machine1 = { - ""uuid"": r1.json()[""uuid""], - - } - machine2 = { - ""uuid"": r2.json()[""uuid""], - - } - machine3 = { - ""uuid"": r3.json()[""uuid""], - - } - params = { - ""name"": ""Machine1"", - ""is_public"": True, - ""machines"": { - ""add"": [machine1, machine2, machine3], - }, - - } - - params, r = run_api.ilibrary_add_new_island(params=params) - island_type = r.json()[""island_type""] - island_id = r.json()[""uuid""] - run_api.ilibrary_delete(uuid=island_id) - assert island_type == ""private"", ""The json is %s"" % r.json() + params, r = library_add_new_vm + lib_id = r[""UUID""] + deploy = run_api.deploy_image(lib_id) + r = run_api.library_delete(lib_id, {}) + test_assert.status(r, 400) + deployjson = deploy.json() + run_api.deploy_image_delete(deployjson['UUID'], {}) " -/ilibrary/rest/details/{UUID}/,fetching details of island library without Authorization,"{ - uid = ""valid"" +/library/rest/delete/{UUID}/,deleting a library when requested with invalid token,"{ +lib_id = 'wrong' }","{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided."" -}","def test_ilibrary_details_without_authorization(anonymous_exec_api): +""message"" : ""Invalid token"" +}","def test_lib_delete_with_invalid_token(invalid_exec_api): """""" - Details of Ilibrary without authorization + without authorization """""" - uid = ""valid"" - r = anonymous_exec_api.ilibrary_details(uuid=uid) - result = r.json() - test_assert.status(r, 401) - assert result['detail'] == ""Authentication credentials were not provided."" + lib_id = 'wrong' + ret = invalid_exec_api.library_delete(lib_id) + test_assert.status(ret, 401) + rjson = ret.json() + assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) " -/ilibrary/rest/details/{UUID}/,fetching details of private island library from public island,"{ - ""name"": ""Machine1"", - ""is_public"": True, - ""machines"": { - ""add"": [machine1], - }, - }","{ - ""response"" : success -}","def test_ilibrary_details_with_edit_public_island_to_private_island(skip_if_not_admin, run_api): +/library/rest/delete/{UUID}/,deleting a library when machine UUID does not exist,"{ + lib_id = ""invalid"" + +}","{ +""status"" : 404, +""message"" : Machine DoesNotExist +}","def test_lib_delete_with_invalid_UUID(run_api): """""" - To check machine type with private island + When machine UUID does not exist """""" - params1, r1 = run_api.library_add_new_vm(networks=networks) - machine1 = { - ""uuid"": r1.json()[""uuid""], - ""nics"": { - ""add"": [ - { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Default Public Segment"" - }, + lib_id = ""invalid"" + ret = run_api.library_delete(lib_id) + test_assert.status(ret, 404) +" +/library/rest/delete/{UUID}/,deleting a library by manager when provided with valid UUID,,,"endpoint = ""lib_delete"" - ], - } +PARAMETERS = [{""dest_obj"": OBJ_LIB}] - } - params = { - ""name"": ""Machine1"", - ""is_public"": True, - ""machines"": { - ""add"": [machine1], - }, - } - params, r = run_api.ilibrary_add_new_island(params=params) - island_id = r.json()[""uuid""] - params, r = run_api.ilibrary_edit_island(uuid=island_id, params={""is_public"": False}) - res = r.json()[""machines""] - run_api.ilibrary_delete(uuid=island_id) - run_api.library_delete(r1.json()[""uuid""]) - for machine in res: - if machine[""is_public""]: - assert False, ""The json is %s"" % r.json() -" -/ilibrary/rest/details/{UUID}/,fetching details of public machines present in private island library,"{ - ""name"": ""Machine1"", - ""is_public"": False, - ""machines"": { - ""add"": [machine1], - }, - }","{ - ""response"" : success -}","def test_ilibrary_details_with_private_island_with_public_machine(run_api): + +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_lib_delete_manager(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - To check machine type with public island + Delete the Library by Manager """""" - params1, r1 = run_api.library_add_new_vm(networks=networks, is_public=True) - machine1 = { - ""uuid"": r1.json()[""uuid""], - ""nics"": { - ""add"": [ - { - ""mac"": ""auto"", - ""type"": ""bridge"", - ""model"": networks[0].get(""model"", ""virtio""), - ""segment"": ""Default Public Segment"" - }, - - ], - } + # When the user is not part of the group that the manager manages + lib_id = custom_lib_admin_operations + r = run_api.library_delete(lib_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) - } - params = { - ""name"": ""Machine1"", - ""is_public"": False, - ""machines"": { - ""add"": [machine1], - }, - } - params, r = run_api.ilibrary_add_new_island(params=params) - result = r.json()[""machines""] - island_id = r.json()[""uuid""] - run_api.ilibrary_delete(uuid=island_id) - run_api.library_delete(r1.json()[""uuid""]) - for machine in result: - if machine[""is_public""]: - assert False, ""The machine is still public in private island and the json is %s"" % r.json() + # When the user is part of the group that the manager manages + lib_id = custom_lib_non_admin_operations + r = run_api.library_delete(lib_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) +" +/library/rest/delete/{UUID}/,deleting a library by admin when provided with valid UUID,,"{ +""status"" : 204 +}","PARAMETERS = [{""dest_obj"": OBJ_LIB}] +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_lib_delete_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Deleting the Library by Admin + """""" + # Admin check for deleting the Library created by different user. + lib_id = custom_lib_non_admin_operations + r = run_api.library_delete(lib_id, {}) + test_assert.status(r, 204) " -/ilibrary/rest/edit/{UUID}/,add segment with same name that already exists,"params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ] - }, - } -params1 = { - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ] - }, - ""is_public"": True - }","{ - ""status"": 400, - ""message"": ""The segment name(s) is/are already taken for other island"" -}","def test_ilibrary_edit_add_segment_with_existing_name(run_api): +/library/rest/details/{UUID}/ ,getting library details when requested with invalid token,"{ + UUID = 'valid_UUID' +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_lib_details_with_invalid_token(invalid_exec_api): + """""" + invalid token + """""" + UUID = 'invalid' + r = invalid_exec_api.library_details(UUID, {}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) +" +/library/rest/details/{UUID}/ ,getting library details when provided without Authorization,"{ + UUID = 'valid_UUID' +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}"," +def test_lib_details_without_authorization(anonymous_exec_api): + """""" + without authorization + """""" + UUID = 'invalid' + r = anonymous_exec_api.library_details(UUID, {}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The Error is {}"".format(rjson['detail']) +" +/library/rest/details/{UUID}/ ,getting library details when provided with invalid UUID,"{ + UUID = 'invalid' +}","{ +""status"" : 200, +""message"" : ""Machine DoesNotExist"" +}","def test_lib_details_with_invalid_UUID(run_api): """""" - Editing an Island Library add segment with existing name + when provided invalid UUID """""" - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ] - }, - } - params, r = run_api.ilibrary_add_new_island(params=params) + UUID = 'invalid' + r = run_api.library_details(UUID, {}) + test_assert.status(r, 404) rjson = r.json() - uuid = rjson[""uuid""] + assert rjson['error'] == ""Machine Details: Machine not found"", ""|> The error message is %s"" % (rjson['error']) +" +/library/rest/details/{UUID}/ ,getting library details requested by an admin user,,"{ +""status"" : 200, +""response"" : Library details displayed +}","PARAMETERS = [{""dest_obj"": OBJ_LIB}] - params1 = { - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ] - }, - ""is_public"": True - } - params, r = run_api.ilibrary_edit_island(uuid, params=params1) - test_assert.status(r, 400) - res = r.json() - assert res['error'] == ""The segment name(s) {'network1'} is/are already taken for the island"" - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_lib_details_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Details of the Library by Admin + """""" + # Admin check for fetching details of the Library created by different user. + lib_id = custom_lib_non_admin_operations + r = run_api.library_details(lib_id, {}) + test_assert.status(r, 200) " -/ilibrary/rest/edit/{UUID}/,adding a Segment name which already exists but was deleted,"params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ] - } +/library/rest/details/{UUID}/ ,getting library details requested by a non-admin user,,"{ +""status"" : 403 +}"," +PARAMETERS = [{""dest_obj"": OBJ_LIB}] - } -params1 = { - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ], - 'delete': [ - { - 'uuid': seg_id, - } - ] - }, - ""is_public"": True - }","{ - ""status"": 201, - ""response"" : success -}","def test_ilibrary_edit_add_and_delete_segment_same_name(run_api): +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_lib_details_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): """""" - Editing an Island Library Add a Segment name which already exists but is being deleted + Details of the Library by non-Admin """""" - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ] - } + # Non-admin check for fetching details of the Library created by different user. + lib_id = custom_lib_admin_operations + r = run_api.library_details(lib_id, {}) + test_assert.status(r, 403) - } - params, r = run_api.ilibrary_add_new_island(params=params) +" +/library/rest/details/{UUID}/ ,getting library details ,,"{ +""status"" : 200, +""response"" : Library details displayed +}","def test_lib_details(library_details): + """""" + Getting the Library details + """""" + x, r = library_details + test_assert.status(r, 200) +" +/library/rest/dformattypes/,getting the details of DiskFormat Type without Authorization,,"{ +""status"" : 200, +""message"" : DiskFormat type list +}","def test_library_dformattypes_without_authorization(anonymous_exec_api): + """""" + Getting the list of disk format types + """""" + r = anonymous_exec_api.library_disk_format_type() + result = r.json() + test_assert.status(result, LIBRARY_DISK_FORMAT_TYPE, ""library_dformattypes"") + test_assert.status(r, 200) +" +/library/rest/dformattypes/,getting the details of DiskFormat Type when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_dformattypes_with_invalid_token(invalid_exec_api): + """""" + Getting the list of disk format types + """""" + r = invalid_exec_api.library_disk_format_type() + test_assert.status(r, 401) rjson = r.json() - uuid = rjson[""uuid""] - segment_list = rjson['network_segments'] - - for segment in segment_list: - if segment['name'] == 'network1': - seg_id = segment['uuid'] - - params1 = { - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ], - 'delete': [ - { - 'uuid': seg_id, - } - ] - }, - ""is_public"": True - } - params, r = run_api.ilibrary_edit_island(uuid, params=params1) - test_assert.status(r, 201) - # res = r.json() - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) - + assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) " -/ilibrary/rest/edit/{UUID}/,adding and updating the segment giving the same name that already exists,"params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ] - } - } -params1 = { - 'network_segments': { - 'add': [ - { - 'name': 'network2' - } - ], - 'update': [ - { - 'uuid': seg_id, - 'name': 'network2' - } - ] - }, - ""is_public"": True - }","{ - ""status"": 400, - ""message"": ""Segment name should be unique for an island"" -}","def test_ilibrary_edit_add_and_update_segment_same_name(run_api): +/library/rest/dformattypes/,getting the details of DiskFormat Type,,"{ +""status"" : 200, +""message"" : DiskFormat type list +}","def test_library_dformattypes(run_api): + """""" + Getting the list of disk format types + """""" + r = run_api.library_disk_format_type() + result = r.json() + test_assert.status(result, LIBRARY_DISK_FORMAT_TYPE, ""library_dformattypes"") + test_assert.status(r, 200) +" +/library/rest/dtypes/,getting DiskBus Type list when requested without authorization,,"{ +""status"" : 200, +""message"" : DiskBus type list +}","def test_library_dtypes_without_authorization(anonymous_exec_api): + """""" + Getting the list of disk type without authorization + """""" + r = anonymous_exec_api.library_disk_type() + result = r.json() + test_assert.status(result, LIBRARY_DISK_TYPE, ""library_boottypes"") + test_assert.status(r, 200) +" +/library/rest/dtypes/,getting DiskBus Type list when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_dtypes_with_invalid_token(invalid_exec_api): """""" - Editing an Island Library add and update segment with same name + Getting the list of disk type """""" - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ] - } - - } - params, r = run_api.ilibrary_add_new_island(params=params) - rjson = r.json() - uuid = rjson[""uuid""] - segment_list = rjson['network_segments'] - for segment in segment_list: - if segment['name'] == 'network1': - seg_id = segment['uuid'] - params1 = { - 'network_segments': { - 'add': [ - { - 'name': 'network2' - } - ], - 'update': [ - { - 'uuid': seg_id, - 'name': 'network2' - } - ] - }, - ""is_public"": True - } - params, r = run_api.ilibrary_edit_island(uuid, params=params1) - test_assert.status(r, 400) - res = r.json() - assert res['error'] == ""Segment name should be unique for an island"" - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) + r = invalid_exec_api.library_disk_type() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Invalid token."", ""|> The Error is {}"".format(result['detail']) " -/ilibrary/rest/edit/{UUID}/,deleting an island library segment which is connected to NIC,"params = { - ""name"": ""test_ilibrary"", - ""machines"": { - ""add"": [ - { - ""uuid"": r1.json()[""uuid""] - } - ] - }, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ] - }, - ""is_public"": False - } -params3 = { - ""machines"": { - ""update"": [ - { - ""uuid"": machine_uuid, - ""nics"": { - ""add"": [ - { - ""model"": ""virtio"", - 'segment': 'network1' - } - ] - } - } - ] - }, - ""is_public"": False - }","{ - ""status"": 201, - ""response"" : success -}","def test_ilibrary_edit_delete_segment_connected_to_nic(run_api): +/library/rest/dtypes/,getting DiskBus Type list When Requested,,"{ +""status"" : 200, +""message"" : DiskBus type list +}","def test_library_dtypes(run_api): """""" - Editing an Island Library by Deleteing a Segment connected to NIC and check NICs final connection + Getting the list of disk type """""" - networks = template_networks() - if run_api.arch_type == ""aarch64"": - params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - else: - params1, r1 = run_api.library_add_new_vm(networks=networks) - - params = { - ""name"": ""test_ilibrary"", - ""machines"": { - ""add"": [ - { - ""uuid"": r1.json()[""uuid""] - } - ] - }, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ] - }, - ""is_public"": False - } - - params, r3 = run_api.ilibrary_add_new_island(params=params) - rjson3 = r3.json() - uuid = rjson3['uuid'] - segment_list = rjson3['network_segments'] - - for segment in segment_list: - if segment['name'] == 'network1': - seg_uuid = segment['uuid'] - - machine_uuid = rjson3['machines'][0]['uuid'] - params3 = { - ""machines"": { - ""update"": [ - { - ""uuid"": machine_uuid, - ""nics"": { - ""add"": [ - { - ""model"": ""virtio"", - 'segment': 'network1' - } - ] - } - } - ] - }, - ""is_public"": False - } - params, r = run_api.ilibrary_edit_island(uuid, params=params3) - nics = r.json()['machines'][0]['hw']['networks'] - for nic in nics: - if nic['segment'] == 'network1': - nic_id = nic['id'] + r = run_api.library_disk_type() + result = r.json() + test_assert.status(result, LIBRARY_DISK_TYPE, ""library_boottypes"") + test_assert.status(r, 200) +" +/library/rest/edit/{UUID}/,updation of serialport in a library,"serialports = [{ + ""source_type"": ""pty"", + ""target_type"": ""isa-serial"", + }] - params = { - 'network_segments': { - 'delete': [ - { - 'uuid': seg_uuid - } - ] - }, - ""is_public"": False - } - params, r = run_api.ilibrary_edit_island(uuid, params=params) - test_assert.status(r, 201) - rjson = r.json() - nics = rjson['machines'][0]['hw']['networks'] - assert isinstance(nic_id, int) - for nic in nics: - if nic['id'] == nic_id: - assert nic['segment'] is None +updated_serialports = [{ + ""source_type"": ""pty"", + ""target_type"": ""pci-serial"", + }] - if 'error' not in rjson3.keys(): - run_api.ilibrary_delete(uuid, params3) - run_api.library_delete(r1.json()[""uuid""]) -" -/ilibrary/rest/edit/{UUID}/,deleting Island with UUID of machine which is part of other Island,"params2 = { - ""name"": ""test_ilibrary"", - ""machines"": { - ""add"": [ - { - ""uuid"": rjson1[""uuid""] - } - ] - }, - ""is_public"": False - } -params3 = { - ""name"": ""test_ilibrary"", - ""is_public"": True - } -params4 = { - ""machines"": { - ""delete"": [ - { - ""uuid"": machine_uuid - } - ] - }, - ""is_public"": False - }","{ - ""status"": 404, - ""response"": forbidden -}","def test_ilibrary_edit_delete_machine_of_another_island(run_api, library_add_new_vm): +","{ +""status"" : 201 +}","def test_library_edit_serialport(run_api): """""" - Editing an Island Library by deleting uuid of machine which is part of another island + update serialport """""" - - params1, rjson1 = library_add_new_vm - params2 = { - ""name"": ""test_ilibrary"", - ""machines"": { - ""add"": [ - { - ""uuid"": rjson1[""uuid""] - } - ] - }, - ""is_public"": False + serialports = [{ + ""source_type"": ""pty"", + ""target_type"": ""isa-serial"", + }] + p, r = run_api.library_add_new_vm(serialports=serialports) + lib_id = r.json()['UUID'] + updated_serialports = [{ + ""source_type"": ""pty"", + ""target_type"": ""pci-serial"", + }] + params = {'hw': {'serialports': updated_serialports}} + res = run_api.library_edit(lib_id, params) + test_assert.status(res, 201) + rjson = res.json() + for serialport in rjson['hw']['serialports']: + assert serialport['source_type'] == 'pty', ""|> Json %s"" % rjson + assert serialport['target_type'] == 'pci-serial', ""|> Json %s"" % rjson + run_api.library_delete(lib_id) +" +/library/rest/edit/{UUID}/,updation of network in a library with invalid mac,"networks = [{ + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", } - params2, r2 = run_api.ilibrary_add_new_island(params=params2) - rjson2 = r2.json() - uuid2 = rjson2['uuid'] - machine_uuid = rjson2['machines'][0]['uuid'] + ] - params3 = { - ""name"": ""test_ilibrary"", - ""is_public"": True - } - params3, r3 = run_api.ilibrary_add_new_island(params=params3) - rjson3 = r3.json() - uuid3 = rjson3[""uuid""] +update_network = [{ + ""mac"": ""invalid"" + }] - params4 = { - ""machines"": { - ""delete"": [ - { - ""uuid"": machine_uuid - } - ] - }, - ""is_public"": False - } - params, r = run_api.ilibrary_edit_island(uuid3, params=params4) - test_assert.status(r, 404) - if 'error' not in rjson2.keys(): - run_api.ilibrary_delete(uuid2, params2) - if 'error' not in rjson3.keys(): - run_api.ilibrary_delete(uuid3, params3) -" -/ilibrary/rest/edit/{UUID}/,deleting NIC which is a part of some other machine,"params3 = { - ""name"": ""test_ilibrary"", - ""machines"": { - ""add"": [ - { - ""uuid"": r1.json()[""uuid""] - } - ] - }, - ""is_public"": False +","{ +""status"" : 400, +""message"" : ""MAC address is not correct"" +}","def test_library_edit_network_invalid_mac(run_api): + """""" + update network with invalid mac + """""" + networks = [{ + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", } -params4 = { - ""machines"": { - ""update"": [ - { - ""uuid"": machine_uuid, - ""nics"": { - ""delete"": [ - { - 'id': second_nic - } - ] - } - } - ] - }, - ""is_public"": False - }","{ - ""status"": 400, - ""message"": ""The NIC with the given id isn't part of the provided machine"" -}","def test_ilibrary_edit_delete_nic_of_other_machine(run_api): + ] + params, r = run_api.library_add_new_vm(networks=networks) + update_netork = [{ + ""mac"": ""invalid"" + }] + params = {'hw': {'networks': update_netork}} + lib_id = r.json()[""UUID""] + res = run_api.library_edit(lib_id, params) + test_assert.status(res, 400) + rjson = res.json() + assert rjson['error'] == ""MAC address `invalid` is not correct"", ""|> Json %s"" % rjson + run_api.library_delete(lib_id, {}) +" +/library/rest/edit/{UUID}/,updation of library without Authorization,"{ + lib_id = ""doesnotexits"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_library_edit_without_authorization(anonymous_exec_api): """""" - Editing an Island Library by Deleting NIC which is not part of this machine but some other machine + without authorization """""" - networks = template_networks() - if run_api.arch_type == ""aarch64"": - params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - else: - params1, r1 = run_api.library_add_new_vm(networks=networks) - params2, r2 = run_api.library_add_new_vm(networks=networks) + lib_id = ""doesnotexits"" + r = anonymous_exec_api.library_edit(lib_id, {""hw"": {}}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/edit/{UUID}/,updation of library with network type host and segment Default Public Segment,"networks = [{ + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", + } + ] - # first_nic = r1.json()['hw']['networks'][0]['id'] - second_nic = r2.json()['hw']['networks'][0]['id'] - params3 = { - ""name"": ""test_ilibrary"", - ""machines"": { - ""add"": [ - { - ""uuid"": r1.json()[""uuid""] - } - ] - }, - ""is_public"": False - } - params3, r3 = run_api.ilibrary_add_new_island(params=params3) - rjson3 = r3.json() - uuid = rjson3['uuid'] - machine_uuid = rjson3['machines'][0]['uuid'] - params3 = { - ""machines"": { - ""update"": [ - { - ""uuid"": machine_uuid, - ""nics"": { - ""delete"": [ - { - 'id': second_nic - } - ] - } - } - ] - }, - ""is_public"": False +update_netork = [{ + ""type"": ""host"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", + }] +","{ +""status"" : 400, +""message"" :""Network type `host` can only be connected to `HostOnly Segment`, your provided input for segment is `Default Public Segment`."" +}","def test_library_edit_with_network_type_host_segment_default_public(run_api): + """""" + Library update with network type host and segment Default Public Segment + """""" + networks = [{ + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", } - params, r = run_api.ilibrary_edit_island(uuid, params=params4) - test_assert.status(r, 400) - res = r.json() - assert res['error'] == ""The NIC with the given id '"" + str(second_nic) + ""' isn't part of the provided machine"" - if 'error' not in rjson3.keys(): - run_api.ilibrary_delete(uuid, params) - run_api.library_delete(r1.json()[""uuid""]) - run_api.library_delete(r2.json()[""uuid""]) + ] + params, r = run_api.library_add_new_vm(networks=networks) + update_netork = [{ + ""type"": ""host"", + ""model"": ""virtio"", + ""segment"": ""Default Public Segment"", + }] + params = {'hw': {'networks': update_netork}} + lib_id = r.json()[""UUID""] + res = run_api.library_edit(lib_id, params) + test_assert.status(res, 400) + rjson = res.json() + assert rjson['error'] == ""Network type `host` can only be connected to `HostOnly Segment`, your provided input for segment is `Default Public Segment`."", ""|> Ther error is %s"" % rjson + run_api.library_delete(lib_id, {}) + " -/ilibrary/rest/edit/{UUID}/,editing an ilibrary with no machines operations,"{ -uuid = 'valid-uuid' -params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - ""machines"": {}, - ""is_public"": True +/library/rest/edit/{UUID}/,updation of library with network type changed to host and segment is set to Default Public Segment,"networks = [{ + ""type"": ""host"", + ""model"": ""virtio"", + ""segment"": ""HostOnly Segment"", + } + ] -} -}","{ -""status"" : 201, -""response"" : success -}","def test_ilibrary_edit_with_no_machine_operation(run_api): +update_netork = [{ + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""HostOnly Segment"", + }] +","{ +""status"" : 400, +""message"" : ""Network type `bridge` can only be connected to `Default Public Segment`, your provided input for segment is `HostOnly Segment`."" +}","def test_library_edit_with_network_type_bridge_segment_HostOnly(run_api): """""" - Editing an Island Library with no machine operation + Library update with network type host and segment Default Public Segment """""" - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True - } - params, r = run_api.ilibrary_add_new_island(params=params) - rjson = r.json() - uuid = rjson[""uuid""] - params = { - 'machines': {}, - ""is_public"": True + networks = [{ + ""type"": ""host"", + ""model"": ""virtio"", + ""segment"": ""HostOnly Segment"", } - params, r = run_api.ilibrary_edit_island(uuid, params=params) - test_assert.status(r, 201) - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) -" -/ilibrary/rest/edit/{UUID}/,editing an ilibrary with no name,"{ - ""name"": ""test_ilibrary"", - ""is_public"": True, - ""description"": ""testing"", - ""is_public"": True - }","{ -""status"" : 201, -""response"" : success -}","def test_ilibrary_edit_with_no_name(run_api): + ] + params, r = run_api.library_add_new_vm(networks=networks) + update_netork = [{ + ""type"": ""bridge"", + ""model"": ""virtio"", + ""segment"": ""HostOnly Segment"", + }] + params = {'hw': {'networks': update_netork}} + lib_id = r.json()[""UUID""] + res = run_api.library_edit(lib_id, params) + test_assert.status(res, 400) + rjson = res.json() + assert rjson['error'] == ""Network type `bridge` can only be connected to `Default Public Segment`, your provided input for segment is `HostOnly Segment`."", ""|> Ther error is %s"" % rjson + run_api.library_delete(lib_id, {}) + + +" +/library/rest/edit/{UUID}/,updation of library when UUID exists and it has next revision,,"{ +""status"" : 403, +""message"" : ""Next Revision Exists , Edit Permission Not Allowed"" +}","def test_library_edit_with_revision_exists(library_add_new_vm, run_api): """""" - Editing an Island Library with no name + When machine with UUID Does Not Exist """""" - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True - } - params, r = run_api.ilibrary_add_new_island(params=params) - rjson = r.json() - uuid = rjson[""uuid""] - params = { - ""description"": ""testing"", - ""is_public"": True - } - params, r = run_api.ilibrary_edit_island(uuid, params=params) - test_assert.status(r, 201) - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) + parmas, r = library_add_new_vm + lib_id = r['UUID'] + res = run_api.deploy_image(lib_id=lib_id) + deploy_id = res.json()['UUID'] + revision = run_api.deploy_snapshot(deploy_id=deploy_id) + edit_r = run_api.library_edit(lib_id, {""hw"": {}}) + edit_rjson = edit_r.json() + test_assert.status(edit_r, 403) + assert edit_rjson['result'] == ""Next_revision Exists: Edit permission not allowed"", "">| The error message is %s"" % (edit_rjson['result']) + run_api.deploy_image_delete(deploy_id, {}) + revision_id = revision.json()['snapshotted_machine_UUID'] + run_api.library_delete(revision_id) " -/ilibrary/rest/edit/{UUID}/,editing an ilibrary without token,"{ -uuid = 'valid-uuid' -params = { - ""name"": ""test_ilibrary"", - ""is_public"": True -} +/library/rest/edit/{UUID}/,updation of library when requested with invalid token,"{ + lib_id = ""doesnotexits"" }","{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided."" -}","def test_ilibrary_edit_without_authorization(anonymous_exec_api): +""message"" : ""Invalid token"" +}","def test_library_edit_with_invalid_token(invalid_exec_api): """""" - Editing an Island Library without authorization + with invalid token """""" - uuid = 'valid-uuid' - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True - } - params, r = anonymous_exec_api.ilibrary_edit_island(uuid, params=params) + lib_id = ""doesnotexits"" + r = invalid_exec_api.library_edit(lib_id, {""hw"": {}}) test_assert.status(r, 401) - res = r.json() - assert res['detail'] == ""Authentication credentials were not provided."" + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) " -/ilibrary/rest/edit/{UUID}/,editing an Island Library by deleting segment which is part of another island library,"params1 = { - ""name"": ""test_ilibrary"", - ""is_public"": True - } -params2 = { - ""name"": ""test_ilibrary"", - ""is_public"": True +/library/rest/edit/{UUID}/,updation of disks in a library using the size param,"disks = {""update"": [ + { + ""UUID"": r['hw']['disks'][0]['UUID'], + ""port"": ""sdz"", + ""type"": r['hw']['disks'][0]['type'], + ""size"": 5 + } + ] } -params3 = { - 'network_segments': { - 'delete': [ - { - 'uuid': network_segment2['uuid'] - } - ] - - }, - ""is_public"": True - }","{ - ""status"": 400, - ""message"": ""The Segment uuid doesn't exist in the island"" -}","def test_ilibrary_edit_delete_segments_of_other_island(run_api): +","{ +""status"" : 400, +""message"" : """"Modifying the disk size during Library Edit is not permitted"", +}"," +def test_library_edit_disk_size_param(library_add_new_vm, run_api): """""" - Editing an Island Library by deleting segment which is part of another island library + Update disk with 'size' param """""" - params1 = { - ""name"": ""test_ilibrary"", - ""is_public"": True - } - params1, r1 = run_api.ilibrary_add_new_island(params=params1) - rjson1 = r1.json() - uuid1 = rjson1[""uuid""] - - params2 = { - ""name"": ""test_ilibrary"", - ""is_public"": True - } - params2, r2 = run_api.ilibrary_add_new_island(params=params2) - rjson2 = r2.json() - uuid2 = rjson2[""uuid""] - - network_segment2 = rjson2['network_segments'][0] - params3 = { - 'network_segments': { - 'delete': [ - { - 'uuid': network_segment2['uuid'] - } - ] - - }, - ""is_public"": True + p, r = library_add_new_vm + lib_id = r['UUID'] + disks = {""update"": [ + { + ""UUID"": r['hw']['disks'][0]['UUID'], + ""port"": ""sdz"", + ""type"": r['hw']['disks'][0]['type'], + ""size"": 5 + } + ] } - params3, r3 = run_api.ilibrary_edit_island(uuid1, params=params3) - test_assert.status(r3, 400) - res = r3.json() - assert res['error'] == ""The Segment uuid ["" + network_segment2['uuid'] + ""] doesn't exist in the island"" - if 'error' not in rjson1.keys(): - run_api.ilibrary_delete(uuid1, params1) - if 'error' not in rjson2.keys(): - run_api.ilibrary_delete(uuid2, params2) + params = {""hw"": {""disks"": disks}} + r = run_api.library_edit(lib_id, params) + test_assert.status(r, 400) + rjson = r.json() + rjson['error'] == ""Modifying the disk size during Library Edit is not permitted"", ""|> json %s"" % rjson " -/ilibrary/rest/edit/{UUID}/,editing an island library segment,"{ - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'seg1' - }, - { - 'name': 'seg2' - }, - { - 'name': 'seg3' - }, - ] - }, - 'network_segments': { - 'update': [ - { - 'uuid': seg1_id, - 'name': 'seg3' - }, - { - 'uuid': seg2_id, - 'name': 'seg1' - }, - { - 'uuid': seg3_id, - 'name': 'seg2' - } - ] - }, - ""is_public"": True +/library/rest/edit/{UUID}/,updation of disks in a library,"disks = {""update"": [ + { + ""UUID"": r['hw']['disks'][0]['UUID'], + ""port"": ""sdz"", + ""type"": r['hw']['disks'][0]['type'] + } + ] }","{ -""status"" : 201, -""response"" : success -}","def test_ilibrary_edit_segments(run_api): +""status"" : 201 +}","def test_library_edit_update_disk(library_add_new_vm, run_api): """""" - Editing an Island Library segments + Update disk """""" - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'seg1' - }, - { - 'name': 'seg2' - }, - { - 'name': 'seg3' - }, - ] - }, - } - params, r = run_api.ilibrary_add_new_island(params=params) - rjson = r.json() - uuid = rjson[""uuid""] - segment_list = rjson['network_segments'] - - for segment in segment_list: - if segment['name'] == 'seg1': - seg1_id = segment['uuid'] - elif segment['name'] == 'seg2': - seg2_id = segment['uuid'] - elif segment['name'] == 'seg3': - seg3_id = segment['uuid'] - - params = { - 'network_segments': { - 'update': [ - { - 'uuid': seg1_id, - 'name': 'seg3' - }, - { - 'uuid': seg2_id, - 'name': 'seg1' - }, - { - 'uuid': seg3_id, - 'name': 'seg2' - } - ] - }, - ""is_public"": True + p, r = library_add_new_vm + lib_id = r['UUID'] + disks = {""update"": [ + { + ""UUID"": r['hw']['disks'][0]['UUID'], + ""port"": ""sdz"", + ""type"": r['hw']['disks'][0]['type'] + } + ] } - params, r = run_api.ilibrary_edit_island(uuid, params=params) + params = {""hw"": {""disks"": disks}} + r = run_api.library_edit(lib_id, params) test_assert.status(r, 201) - res = r.json() - segment_list = res['network_segments'] - - for segment in segment_list: - if segment['uuid'] == seg1_id: - assert segment['name'] == 'seg3' - elif segment['uuid'] == seg2_id: - assert segment['name'] == 'seg1' - elif segment['uuid'] == seg3_id: - assert segment['name'] == 'seg2' + rjson = r.json() + assert rjson['hw']['disks'][0]['port'] == 'sdz', ""|> json %s"" % rjson +" +/library/rest/edit/{UUID}/,updation of disk when invalid UUID provided,,"{ +""status"" : 404, +""message"" : ""Disk with UUID does not exist"" +}","def test_library_edit_invalid_disk_UUID(library_add_new_vm, run_api): + """""" + update disk with invalid UUID + """""" + p, r = library_add_new_vm + lib_id = r['UUID'] + disk_UUID = str(UUID.UUID4()) + # disk_UUID = 'invalid' it gives {'hw': {'disks': {'update': [{'UUID': ['Must be a valid UUID.']}]}}} + disks = {""update"": [ + { + ""UUID"": disk_UUID, + ""port"": ""sdz"", + ""type"": r['hw']['disks'][0]['type'] + } + ] + } + params = {""hw"": {""disks"": disks}} + res = run_api.library_edit(lib_id, params) + test_assert.status(res, 404) + rjson = res.json() + assert rjson['error'] == f""Disk with UUID {disk_UUID} does not exist"", ""|> json %s"" % rjson +" +/library/rest/edit/{UUID}/,updation of cdrom in a library,"cdrom = [ + { + ""type"": ""ide"", + ""is_boot"": False + } + ] - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) +updated_cdrom = [ + { + ""type"": ""sata"", + ""is_boot"": False + } + ] +","{ +""status"" : 201 +}","def test_library_edit_cdrom(run_api): + """""" + update cdrom with valid data + """""" + cdrom = [ + { + ""type"": ""ide"", + ""is_boot"": False + } + ] + p, r = run_api.library_add_new_vm(cdrom=cdrom) + lib_id = r.json()['UUID'] + updated_cdrom = [ + { + ""type"": ""sata"", + ""is_boot"": False + } + ] + params = {'hw': {'cdrom': updated_cdrom}} + res = run_api.library_edit(lib_id, params) + test_assert.status(res, 201) + rjson = res.json() + for cdrom in rjson['hw']['cdrom']: + assert cdrom['type'] == 'sata', ""|> Json %s"" % rjson + assert cdrom['is_boot'] is False, ""|> Json %s"" % rjson + run_api.library_delete(lib_id) " -/ilibrary/rest/edit/{UUID}/,"editing an Island library successfully. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{""name"": ""add"", ""is_public"": False}","{ - ""status"": 403 -}","endpoint = ""ilibrary_edit"" -PARAMETERS = [{""dest_obj"": OBJ_ISL}] +/library/rest/edit/{UUID}/,updation of arch param of library,,"{ +""status"" : 400, +""message"" : ""Architecture of a Machine cannot be modified."" +}","def test_library_edit_arch(library_add_new_vm, run_api): + """""" + Edit the architecture of vm + """""" + p, r = library_add_new_vm + lib_id = r['UUID'] + params = {'hw': {'arch': 'aarch64'}} + res = run_api.library_edit(lib_id, params) + test_assert.status(res, 400) + rjson = res.json() + assert rjson['error'] == ""Architecture of a Machine cannot be modified."", ""|> The error is %s"" % rjson +" +/library/rest/edit/{UUID}/,"edition of details when UUID exists and it doesn't have next revision. Check the user type before performing the operation. +",,"{ +""status"" : 201, +""response"" : Details updated +}"," +endpoint = ""lib_edit"" +PARAMETERS = [{""dest_obj"": OBJ_LIB}] -@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -def test_ilibrary_edit(run_api, ilibrary_edit_island, custom_ilib_admin_operations, custom_ilib_non_admin_operations): + +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_library_edit(run_api, custom_lib_admin_operations, custom_lib_non_admin_operations): """""" - Editing an Island Library + Editing the details of VM """""" - params, r = ilibrary_edit_island + if run_api.arch_type == ""aarch64"": + params, r = run_api.library_add_new_vm(arch=""aarch64"", type=""virtio"", port=""vda"") + else: + params, r = run_api.library_add_new_vm() + rjson = r.json() + lib_id = r.json()[""UUID""] + if run_api.arch_type == ""aarch64"": + dist_add_param = {""type"": ""virtio"", ""port"": ""vdz""} + else: + dist_add_param = {} + r = run_api.library_edit(lib_id, params={""hw"": {""disks"": template_library_edit_disk_add(**dist_add_param)}}) + test_assert.status(params, rjson, ""library_edit"") test_assert.status(r, 201) - # Adding non_admin check of Editing an Island Library created by different user + if 'error' not in rjson.keys(): + UUID = rjson[""UUID""] + run_api.library_delete(UUID, params) + + # Adding non_admin check of Editing a Library Image created by different user if run_api.user_type == USER_TYPE[""non_admin""]: - lib_id = custom_ilib_admin_operations - params = {""name"": ""add"", ""is_public"": False} - params, r = run_api.ilibrary_edit_island(lib_id, params=params) + lib_id = custom_lib_admin_operations + r = run_api.library_edit(lib_id, {""hw"": {}}) test_assert.status(r, 403) - # Adding a Manager check of Editing an Island Library created by a user of his/her group + # Adding a Manager check of Editing a deployment info created by a user of his/her group # and also when it's not the case if run_api.user_type == USER_TYPE[""manager""]: # When the user is not part of the group that the manager manages - lib_id = custom_ilib_admin_operations - params = {""name"": ""add"", ""is_public"": False} - params, r = run_api.ilibrary_edit_island(lib_id, params=params) + lib_id = custom_lib_admin_operations + r = run_api.library_edit(lib_id, {""hw"": {}}) test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) # When the user is part of the group that the manager manages - lib_id = custom_ilib_non_admin_operations - params = {""name"": ""add"", ""is_public"": False} - params, r = run_api.ilibrary_edit_island(lib_id, params=params) + lib_id = custom_lib_non_admin_operations + r = run_api.library_edit(lib_id, {""hw"": {}}) test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) " -/ilibrary/rest/edit/{UUID}/,editing an Island library when invalid UUID is provided,"{ -uuid = 'invalid-uuid' -params = { - ""name"": ""test_ilibrary"", - ""is_public"": True -} -}","{ - ""status"": 404, -}","def test_ilibrary_edit_invalid_uuid(run_api): +​/library​/rest​/hvmtypes​/,fetching the hypervisor type when requested without Authorization,,"{ +""status"" : 200, +""response"" : list of hypervisor type +}","def test_library_hvmtypes_without_authorization(anonymous_exec_api): """""" - Editing an Island Library with invalid uuid + without authorization """""" - uuid = 'invalid-uuid' - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True - } - params, r = run_api.ilibrary_edit_island(uuid, params=params) - test_assert.status(r, 404) + r = anonymous_exec_api.library_hvmtypes() + result = r.json() + test_assert.status(result, LIBRARY_HVM_TYPE, ""library_hvmtypes"") + test_assert.status(r, 200) " -/ilibrary/rest/edit/{UUID}/,editing an Island library when requested with invalid token,"{ -uuid = 'invalid-uuid' -params = { - ""name"": ""test_ilibrary"", - ""is_public"": True -} -}","{ +​/library​/rest​/hvmtypes​/,fetching the hypervisor type when requested with invalid token,,"{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_ilibrary_edit_with_invalid_token(invalid_exec_api): - """""" - Editing an Island Library with invalid token - """""" - uuid = 'invalid-uuid' - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True - } - params, r = invalid_exec_api.ilibrary_edit_island(uuid, params=params) - test_assert.status(r, 401) - res = r.json() - assert res['detail'] == ""Invalid token."" -" -/ilibrary/rest/edit/{UUID}/,editing an island library which has next revision,"{ - 'name': 'test', - ""is_public"": False - }","{ -""status"" : 403, -""response"" : forbidden -}","def test_ilibrary_edit_has_next_revision(run_api, ilibrary_add_new_island): - """""" - Editing an Island Library which has next revision - """""" - template, r = ilibrary_add_new_island - isl_uuid = r.json()[""uuid""] - r = run_api.ideploy_deploy(isl_uuid) - deploy_id = r.json()[""deploy_uuid""] - r, rtask_details = run_api.ideploy_snapshot(deploy_id) - run_api.ideploy_delete(deploy_id) - params = { - 'name': 'test', - ""is_public"": False - } - params, r = run_api.ilibrary_edit_island(isl_uuid, params=params) - test_assert.status(r, 403) - run_api.ilibrary_delete(rtask_details['result']['snapshotted_island_uuid'], {}) - r = run_api.ilibrary_delete(isl_uuid, {}) -" -/ilibrary/rest/edit/{UUID}/,editing an Island with no Segments,"{ - ""name"": ""test_ilibrary"", - ""is_public"": True - 'network_segments': {}, - ""is_public"": True - }","{ -""status"" : 201, -""response"" : success -}","def test_ilibrary_edit_with_no_segments(run_api): - """""" - Editing an Island Library with no segments +}","def test_library_hvmtypes_with_invalid_token(invalid_exec_api): """""" - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True - } - params, r = run_api.ilibrary_add_new_island(params=params) - rjson = r.json() - uuid = rjson[""uuid""] - params = { - 'network_segments': {}, - ""is_public"": True - } - params, r = run_api.ilibrary_edit_island(uuid, params=params) - test_assert.status(r, 201) - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) + with invalid token + """""" + r = invalid_exec_api.library_hvmtypes() + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) " -/ilibrary/rest/edit/{UUID}/,editing an island-library with no description,"{ - ""name"": ""test_ilibrary"", - ""is_public"": True - ""name"": ""test_ilibrary_edit"", - ""is_public"": True - }","{ -""status"" : 201, -""response"" : success -}","def test_ilibrary_edit_with_no_description(run_api, library_add_new_vm): +/library/rest/layerdetail/{UUID}/,fetching the layer details using a lib_id without Authorization,"{ + lib_id = ""doesnotexits"" +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_library_layerdetail_without_authorization(anonymous_exec_api): """""" - Editing an Island Library with no description + without authorization """""" - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True - } - params, r = run_api.ilibrary_add_new_island(params=params) + lib_id = ""doesnotexits"" + r = anonymous_exec_api.library_layerdetail(lib_id, params={}) + test_assert.status(r, 401) rjson = r.json() - uuid = rjson[""uuid""] - params = { - ""name"": ""test_ilibrary_edit"", - ""is_public"": True - } - params, r = run_api.ilibrary_edit_island(uuid, params=params) - test_assert.status(r, 201) - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) + + " -/ilibrary/rest/edit/{UUID}/,editing Island which you are not an owner of but with Admin rights,"{ - ""name"": ""test_ilibrary"", - ""is_public"": True - ""name"": ""test_ilibrary_edit"", - ""is_public"": True - }","{ -""status"" : 201, -""response"" : success -}","def test_ilibrary_edit_admin(skip_if_not_admin, run_api, non_admin_exec_api): +/library/rest/layerdetail/{UUID}/,fetching the layer details using a lib_id for which there is no existing machine,"{ + lib_id = ""doesnotexits"" +}","{ +""status"" : 404, +""response"" : Machine with UUID does not exist +}","def test_library_layerdetails_with_invalid_uid(run_api): """""" - Editing an priate Island Library by admin whose owner is not admin + when machine with UUID does not exists. """""" - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True - } - params, r_isl = non_admin_exec_api.ilibrary_add_new_island(params=params) - rjson_isl = r_isl.json() - ilib_id = rjson_isl[""uuid""] - params = { - ""name"": ""test_ilibrary_edit"", - ""is_public"": True - } - params, r = run_api.ilibrary_edit_island(ilib_id, params=params) - test_assert.status(r, 201) - if 'error' not in rjson_isl.keys(): - r = run_api.ilibrary_delete(ilib_id, {}) + lib_id = ""doesnotexits"" + r = run_api.library_layerdetail(lib_id, params={}) + test_assert.status(r, 404) " -/ilibrary/rest/edit/{UUID}/,updating and deleting a segment name which already exists ,"{ - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ] - }, - 'network_segments': { - 'update': [ - { - 'uuid': seg_id, - 'name': 'network2' - } - ], - 'delete': [ - { - 'uuid': seg_id, - } - ] - }, - ""is_public"": True - }","{ -""status"" : 400, -""message"" : ""The Segment shouldn't have both Updation and Deletion in same API call"" -}","def test_ilibrary_edit_update_and_delete_segment_same_name(run_api): +/library/rest/layerdetail/{UUID}/,fetching the layer details using a lib_id but with invalid token,"{ + lib_id = ""doesnotexits"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_layerdetail_with_invalid_token(invalid_exec_api): """""" - Editing an Island Library update a Segment name which already exists but is being deleted + with invalid token """""" - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ] - } - - } - params, r = run_api.ilibrary_add_new_island(params=params) + lib_id = ""doesnotexits"" + r = invalid_exec_api.library_layerdetail(lib_id, params={}) + test_assert.status(r, 401) rjson = r.json() - uuid = rjson[""uuid""] - segment_list = rjson['network_segments'] - - for segment in segment_list: - if segment['name'] == 'network1': - seg_id = segment['uuid'] - - params = { - 'network_segments': { - 'update': [ - { - 'uuid': seg_id, - 'name': 'network2' - } - ], - 'delete': [ - { - 'uuid': seg_id, - } - ] - }, - ""is_public"": True - } - params, r = run_api.ilibrary_edit_island(uuid, params=params) - test_assert.status(r, 400) - res = r.json() - assert res['error'] == ""The Segment shouldn't have both Updation and Deletion in same API call"" - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) " -/ilibrary/rest/edit/{UUID}/,updating and deleting same machine,"params3 = { - ""name"": ""test_ilibrary"", - ""machines"": { - ""add"": [ - { - ""uuid"": r1.json()[""uuid""] - } - ] - }, - ""is_public"": False - } -params2 = { - ""machines"": { - ""update"": [ - { - ""uuid"": machine_uuid, - 'description': 'description' - } - ], - 'delete': [ - { - ""uuid"": machine_uuid, - } - ], - }, - ""is_public"": False - }","{ - ""status"": 400, - ""message"": ""A machine cannot have both Deletion and Updation in same API call"" -}","def test_ilibrary_edit_update_and_delete_same_machine(run_api): +/library/rest/layerdetail/{UUID}/,fetching the layer details of an existing machine,,"{ +""status"" : 200, +""response"" : details of layer +}","def test_library_layerdetail(library_layerdetail): """""" - Editing an Island Library by updating and deleting same machine + Getting the detail of layer + """""" + template, r = library_layerdetail + result = r.json() + test_assert.status(result, template, ""library_layerdetail"") + test_assert.status(r, 200) +" +/library/rest/layerlist/,requesting to get the list of layer from library ,,"{ +""status"" : 200, +""response"" : list of layer +}","def test_library_layer_list(run_api): + """""" + Getting the list of layer + """""" + r = run_api.library_layer_list() + test_assert.status(r, 200) +" +/library/rest/layerlist/, requesting without Authorization to get the list of layer from library ,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_library_layer_list_without_authorization(anonymous_exec_api): + """""" + without authorization """""" - networks = template_networks() - if run_api.arch_type == ""aarch64"": - params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - else: - params1, r1 = run_api.library_add_new_vm(networks=networks) - - params3 = { - ""name"": ""test_ilibrary"", - ""machines"": { - ""add"": [ - { - ""uuid"": r1.json()[""uuid""] - } - ] - }, - ""is_public"": False - } - params3, r3 = run_api.ilibrary_add_new_island(params=params3) - rjson3 = r3.json() - uuid = rjson3['uuid'] - machine_uuid = rjson3['machines'][0]['uuid'] - - params2 = { - ""machines"": { - ""update"": [ - { - ""uuid"": machine_uuid, - 'description': 'description' - } - ], - 'delete': [ - { - ""uuid"": machine_uuid, - } - ], - }, - ""is_public"": False - } - params, r = run_api.ilibrary_edit_island(uuid, params=params2) - test_assert.status(r, 400) - res = r.json() - assert res['error'] == ""A machine cannot have both Deletion and Updation in same API call"" - if 'error' not in rjson3.keys(): - run_api.ilibrary_delete(uuid, params3) - run_api.library_delete(r1.json()[""uuid""]) - + r = anonymous_exec_api.library_layer_list() + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) " -/ilibrary/rest/edit/{UUID}/,updating machine with no machine name,"{ - ""name"": ""test_ilibrary"", - ""machines"": { - ""add"": [ - { - ""uuid"": rjson[""uuid""] - } - ] - }, - ""is_public"": False, - ""machines"": { - ""update"": [ - { - 'name': """", - ""uuid"": machine_uuid - } - ] - }, - ""is_public"": False - }","{ -""status"" : 201, -""response"" : success -}","def test_ilibrary_edit_with_no_machine_name(run_api, library_add_new_vm): +/library/rest/layerlist/, requesting with invalid token to get the list of layer from the library,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_layer_list_with_invalid_token(invalid_exec_api): """""" - Editing an Island Library with no machine name + with invalid token """""" - params, rjson = library_add_new_vm - params = { - ""name"": ""test_ilibrary"", - ""machines"": { - ""add"": [ - { - ""uuid"": rjson[""uuid""] - } - ] - }, - ""is_public"": False - } - params1, r1 = run_api.ilibrary_add_new_island(params=params) - rjson1 = r1.json() - uuid = rjson1['uuid'] - machine_uuid = rjson1['machines'][0]['uuid'] - params = { - ""machines"": { - ""update"": [ - { - 'name': """", - ""uuid"": machine_uuid - } - ] - }, - ""is_public"": False - } - params, r = run_api.ilibrary_edit_island(uuid, params=params) - test_assert.status(r, 201) - if 'error' not in rjson1.keys(): - run_api.ilibrary_delete(uuid, params1) + r = invalid_exec_api.library_layer_list() + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) " -/ilibrary/rest/edit/{UUID}/,updating machine with no segment name,"{ - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'update': [ - { - 'uuid': network_segment['uuid'], - 'name': '' - } - ] - - }, - ""is_public"": True - }","{ -""status"" : 400, -""message"" :""This field cannot be blank"" -}","def test_ilibrary_edit_segments_with_no_name(run_api): +/library/rest/list/,requesting the list of VM present in the library without Authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_library_list_without_authorization(anonymous_exec_api): """""" - Editing an Island Library segment with no name + without authorization """""" - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True - } - params, r = run_api.ilibrary_add_new_island(params=params) + r = anonymous_exec_api.library_list(params={}) + test_assert.status(r, 401) rjson = r.json() - uuid = rjson[""uuid""] - network_segment = rjson['network_segments'][0] - params = { - 'network_segments': { - 'update': [ - { - 'uuid': network_segment['uuid'], - 'name': '' - } - ] - - }, - ""is_public"": True - } - params, r = run_api.ilibrary_edit_island(uuid, params=params) - test_assert.status(r, 400) - res = r.json() - assert res['network_segments']['update'][0]['name'] == ['This field may not be blank.'] - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) " -/ilibrary/rest/edit/{UUID}/,updating NIC which is a part of some other machine,"params3 = { - ""name"": ""test_ilibrary"", - ""machines"": { - ""add"": [ - { - ""uuid"": r1.json()[""uuid""] - } - ] - }, - ""is_public"": False - } - -params4 = { - ""machines"": { - ""update"": [ - { - ""uuid"": r1.json()[""uuid""], - ""nics"": { - ""update"": [ - { - 'id': second_nic, - ""model"": ""virtio"" - } - ] - } - } - ] - }, - ""is_public"": False - }","{ - ""status"": 400, - ""message"": ""The provided nic with id isn't part of this machine"" -}","def test_ilibrary_edit_update_nics_of_other_island(run_api): +/library/rest/list/,requesting the list of VM present in the library with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_list_with_invalid_token(invalid_exec_api): """""" - Editing an Island Library by updating nic which is part of another island library + with invalid token """""" - networks = template_networks() - if run_api.arch_type == ""aarch64"": - params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - else: - params1, r1 = run_api.library_add_new_vm(networks=networks) - params2, r2 = run_api.library_add_new_vm(networks=networks) - - # first_nic = r1.json()['hw']['networks'][0]['id'] - second_nic = r2.json()['hw']['networks'][0]['id'] - - params3 = { - ""name"": ""test_ilibrary"", - ""machines"": { - ""add"": [ - { - ""uuid"": r1.json()[""uuid""] - } - ] - }, - ""is_public"": False - } - params3, r3 = run_api.ilibrary_add_new_island(params=params3) - rjson3 = r3.json() - uuid = rjson3['uuid'] - - params3 = { - ""machines"": { - ""update"": [ - { - ""uuid"": r1.json()[""uuid""], - ""nics"": { - ""update"": [ - { - 'id': second_nic, - ""model"": ""virtio"" - } - ] - } - } - ] - }, - ""is_public"": False - } - params, r = run_api.ilibrary_edit_island(uuid, params=params4) - test_assert.status(r, 400) - res = r.json() - assert res['error'] == ""The provided nic with id ["" + str(second_nic) + ""] isn't part of this machine"" - if 'error' not in rjson3.keys(): - run_api.ilibrary_delete(uuid, params) - run_api.library_delete(r1.json()[""uuid""]) - run_api.library_delete(r2.json()[""uuid""]) + r = invalid_exec_api.library_list(params={}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) " -/ilibrary/rest/edit/{UUID}/,updating the same machine more than once,"params3 = { - ""name"": ""test_ilibrary"", - ""machines"": { - ""add"": [ - { - ""uuid"": r1.json()[""uuid""] - } - ] - }, - ""is_public"": False - } -params3 = { - ""machines"": { - ""update"": [ - { - ""uuid"": machine_uuid, - 'description': 'description' - }, - { - ""uuid"": machine_uuid, - 'description': 'desc' - } - ] - }, - ""is_public"": False - }","{ - ""status"": 400, - ""message"": ""Updating the same machine more than once may result in an Unexpected value change. Hence, Aborting..."" -}","def test_ilibrary_edit_update_same_machine_twice(run_api): +/library/rest/list/,getting the list of VM present in the library,,"{ +""status"" : 200, +""response"" : ""list of VM +}","def test_library_list(library_list): """""" - Editing an Island Library by updating same machine twice + Getting the list of VM present in the library """""" - networks = template_networks() - if run_api.arch_type == ""aarch64"": - params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - else: - params1, r1 = run_api.library_add_new_vm(networks=networks) - - params3 = { - ""name"": ""test_ilibrary"", - ""machines"": { - ""add"": [ - { - ""uuid"": r1.json()[""uuid""] - } - ] - }, - ""is_public"": False - } - params3, r3 = run_api.ilibrary_add_new_island(params=params3) - rjson3 = r3.json() - uuid = rjson3['uuid'] - machine_uuid = rjson3['machines'][0]['uuid'] - - params3 = { - ""machines"": { - ""update"": [ - { - ""uuid"": machine_uuid, - 'description': 'description' - }, - { - ""uuid"": machine_uuid, - 'description': 'desc' - } - ] - }, - ""is_public"": False - } - params, r = run_api.ilibrary_edit_island(uuid, params=params4) - test_assert.status(r, 400) - res = r.json() - assert res['error'] == ""Updating the same machine more than once may result in an Unexpected value change. Hence, Aborting..."" - if 'error' not in rjson3.keys(): - run_api.ilibrary_delete(uuid, params3) - run_api.library_delete(r1.json()[""uuid""]) - + template, r = library_list + test_assert.status(r, template, ""library_list"", ""name"") + test_assert.status(r, 200) " -/ilibrary/rest/edit/{UUID}/,updating the same segment twice in a single API,"params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ] - } - } -params1 = { - 'network_segments': { - 'update': [ - { - 'uuid': seg_id, - 'name': 'network2' - }, - { - 'uuid': seg_id, - 'name': 'network2' - } - ] - }, - ""is_public"": True - }","{ - ""status"": 400, - ""message"": ""Segment name should be unique for an island"" -}","def test_ilibrary_edit_update_same_segment_twice(run_api): +/library/rest/list/,getting list of vm present in library by filtering it based on created and update DateTime,,"{ +""status"" : 400 +}","def test_library_filter_timefilter(run_api: apiops, library_add_new_vm): """""" - Editing an Island Library Update same Segment twice in a single API + Filter on created and update DateTime Filter """""" - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - } - ] - } + template, rjson = library_add_new_vm + lib_id = rjson[""UUID""] + # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' + str_ctime = rjson['ctime'].replace('T', ' ').replace('Z', '') + datetime_ctime = convert_datetime_stringform(rjson['ctime']) - } - params, r = run_api.ilibrary_add_new_island(params=params) - rjson = r.json() - uuid = rjson[""uuid""] - segment_list = rjson['network_segments'] + def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): + """""" + Function to handle corner case if library image was created a day before and test get triggered on new day + """""" + if not utc: + created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, + ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 + # Filter on UTC time + # .... When the datetime is selected to be the same as in detail + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, + ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 + # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # .........When the created_date_range format is invalid + response = run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() - for segment in segment_list: - if segment['name'] == 'network1': - seg_id = segment['uuid'] + # .........When the created_start_date and created_end_date has white spaces in them + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""page_size"": 1}).json()['count'] == 1 - params = { - 'network_segments': { - 'update': [ - { - 'uuid': seg_id, - 'name': 'network2' - }, - { - 'uuid': seg_id, - 'name': 'network2' - } - ] - }, - ""is_public"": True - } - params, r = run_api.ilibrary_edit_island(uuid, params=params1) - test_assert.status(r, 400) - res = r.json() - assert res['error'] == ""Segment name should be unique for an island"" - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) + # Filter on list time + # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 list and test get triggered on new week at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 list and test get triggered on new month at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 list and test get triggered on new year at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # .........When the created_date_range format is invalid + response = run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has white spaces in them + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 " -/ilibrary/rest/edit/{UUID}/,updating two segment with same name,"params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - }, - { - 'name': 'network2' - } - ] - } - - } -params1 = { - 'network_segments': { - 'update': [ - { - 'uuid': seg1_id, - 'name': 'network3' - }, - { - 'uuid': seg2_id, - 'name': 'network3' - } - ] - }, - ""is_public"": True - }","{ - ""status"": 400, - ""message"": ""Segment name should be unique for an island"" -}","def test_ilibrary_edit_update_two_segment_same_name(run_api): +/library/rest/list/,filtering the list of library details based on created and updated DateTime Filter,,,"def test_library_filter_timefilter(run_api: apiops, library_add_new_vm): """""" - Editing an Island Library Update Segment with same name + Filter on created and update DateTime Filter """""" - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - }, - { - 'name': 'network2' - } - ] - } - - } - params, r = run_api.ilibrary_add_new_island(params=params) - rjson = r.json() - uuid = rjson[""uuid""] - segment_list = rjson['network_segments'] + template, rjson = library_add_new_vm + lib_id = rjson[""UUID""] + # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' + str_ctime = rjson['ctime'].replace('T', ' ').replace('Z', '') + datetime_ctime = convert_datetime_stringform(rjson['ctime']) - for segment in segment_list: - if segment['name'] == 'network1': - seg1_id = segment['uuid'] - if segment['name'] == 'network2': - seg2_id = segment['uuid'] + def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): + """""" + Function to handle corner case if library image was created a day before and test get triggered on new day + """""" + if not utc: + created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, + ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 + # Filter on UTC time + # .... When the datetime is selected to be the same as in detail + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, + ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 + # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") + # .........When the created_date_range format is invalid + response = run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() - params1 = { - 'network_segments': { - 'update': [ - { - 'uuid': seg1_id, - 'name': 'network3' - }, - { - 'uuid': seg2_id, - 'name': 'network3' - } - ] - }, - ""is_public"": True - } - params, r = run_api.ilibrary_edit_island(uuid, params=params1) - test_assert.status(r, 400) - res = r.json() - assert res['error'] == ""Segment name should be unique for an island"" - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) + # .........When the created_start_date and created_end_date has white spaces in them + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""page_size"": 1}).json()['count'] == 1 -" -/ilibrary/rest/edit/{UUID}/,updating two segments giving same name,"params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - }, - { - 'name': 'network2' - } - ] - }, - } -params1 = { - 'network_segments': { - 'update': [ - { - 'uuid': seg_id, - 'name': 'network2' - } - ] - }, - ""is_public"": True - }","{ - ""status"": 400, - ""message"": ""The segment name(s) is/are already taken for other island"" -}","def test_ilibrary_edit_update_segment_with_existing_name(run_api): - """""" - Editing an Island Library segment with existing name - """""" - params = { - ""name"": ""test_ilibrary"", - ""is_public"": True, - 'network_segments': { - 'add': [ - { - 'name': 'network1' - }, - { - 'name': 'network2' - } - ] - }, - } - params, r = run_api.ilibrary_add_new_island(params=params) - rjson = r.json() - uuid = rjson[""uuid""] - segment_list = rjson['network_segments'] + # Filter on list time + # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 + # ........ When the datetime is selected a 1 microsecond more than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 + # ........ When the datetime is selected a 1 microsecond less than from the detail + assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 + # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + except AssertionError: + # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on week's last day at 23:59:59.9999999 list and test get triggered on new week at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on month's last day at 23:59:59.9999999 list and test get triggered on new month at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' + try: + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + except AssertionError: + # when machine is created on year last day at 23:59:59.9999999 list and test get triggered on new year at 00:00:00.0000000 list + handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) + # .........When the created_date_range format is invalid + response = run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""invalid"", ""page_size"": 1}) + test_assert.status(response, 400) + assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has white spaces in them + assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 - for segment in segment_list: - if segment['name'] == 'network1': - seg_id = segment['uuid'] - params1 = { - 'network_segments': { - 'update': [ - { - 'uuid': seg_id, - 'name': 'network2' - } - ] - }, - ""is_public"": True - } - params, r = run_api.ilibrary_edit_island(uuid, params=params1) - test_assert.status(r, 400) - res = r.json() - assert res['error'] == ""The segment name(s) {'network2'} is/are already taken for the island"" - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params) -" -/ilibrary/rest/list/,fetching list of details of ilibrary,,"{ -""status"" : 200, -""response"" : success -}","def test_ilibrary_list(ilibrary_list): - """""" - Getting the lists of Island Library - """""" - r = ilibrary_list - # test_assert.status(r, template, ""library_list"", ""name"") - test_assert.status(r, 200) " -/ilibrary/rest/list/,fetching list of details of ilibrary by a non-admin user,,"{ -""status"" : 200, -""response"" : success -}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) -def test_ilibrary_details_non_admin(skip_if_admin, custom_ilib_admin_operations, run_api): +/library/rest/list/,fetching the list of virtual machine with added Date Time filter using the fetch_all_rev parameter,,,"def test_library_timefilter_fetch_all_rev(run_api): """""" - Details of Ilibrary by non-Admin + Filter on DateTime filter using 'fetch_all_rev' """""" - # Non-admin check for fetching details of the Ilibrary created by different user. - ilibrary_id = custom_ilib_admin_operations - r = run_api.ilibrary_list_island({""uuid"": ilibrary_id}) - test_assert.status(r, 200) - assert r.json()[""count""] == 0 -" -/ilibrary/rest/list/,fetching list of details of ilibrary by adding filters on created and update DateTime filter,,"{ -""status"" : 200, -""response"" : success , filtered list provided -}"," -def test_ilibrary_filter_timefilter(run_api: apiops, ilibrary_add_new_island): + vmname = f""fetch_rev.{datetime.now()}"" + params, r = run_api.library_add_new_vm(name=vmname) + lib_id = r.json()[""uuid""] + # Str_ctime for the lower revision machine + str_ctime_lower_revision = r.json()['ctime'].replace('T', ' ').replace('Z', '') + x = run_api.deploy_image(lib_id=lib_id) + machine_id = x.json()[""uuid""] + res = run_api.deploy_snapshot(deploy_id=machine_id) + snapshotted_machine_uuid = res.json()[""snapshotted_machine_uuid""] + # str_ctime for the upper revison machine after snapshotting + str_ctime_upper_revision = run_api.library_details(uuid=snapshotted_machine_uuid, params={}).json()['ctime'].replace('T', ' ').replace('Z', '') + # ........When the tag 'fetch_all_rev' is set to true + response = run_api.library_list({""name"": vmname, ""created_start_date"": str_ctime_lower_revision, ""created_end_date"": str_ctime_upper_revision, + ""fetch_all_revs"": ""true""}).json() + assert response[""count""] == 2, ""The json is %s"" % response + run_api.deploy_image_delete(deploy_id=machine_id) + run_api.library_delete(uuid=snapshotted_machine_uuid, params={""full_tree"": True})" +/library/rest/list/,fetching the list of virtual machine with added Date Time filter,,,"def test_library_filter_timefilter(run_api: apiops, library_add_new_vm): """""" Filter on created and update DateTime Filter """""" - template, r = ilibrary_add_new_island - rjson = r.json() - ilib_id = rjson[""uuid""] + template, rjson = library_add_new_vm + lib_id = rjson[""uuid""] # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' str_ctime = rjson['ctime'].replace('T', ' ').replace('Z', '') datetime_ctime = convert_datetime_stringform(rjson['ctime']) def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): """""" - Function to handle corner case if ilibrary image was created a day before and test get triggered on new day + Function to handle corner case if library image was created a day before and test get triggered on new day """""" if not utc: created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, - ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, + ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 # Filter on UTC time # .... When the datetime is selected to be the same as in detail - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, - ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": str_ctime, + ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""page_size"": 1}).json()['count'] == 0 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 0 # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), - ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 1 # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""page_size"": 1}).json()['count'] == 1 # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), - ""page_size"": 1}).json()['count'] == 0 + assert run_api.library_list({""uuid"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), + ""page_size"": 1}).json()['count'] == 0 # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today try: - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 except AssertionError: # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday try: - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 except AssertionError: # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week try: - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 except AssertionError: # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month try: - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 except AssertionError: # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' try: - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 except AssertionError: # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") # .........When the created_date_range format is invalid - response = run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) + response = run_api.library_list({""uuid"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) test_assert.status(response, 400) assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() + # .........When the created_start_date and created_end_date has whitespaces in them - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""page_size"": 1}).json()['count'] == 1 # Filter on IST time # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 0 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 # ........ When the datetime is selected to be the same as in detail but with tzone IST, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 1 # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 0 + assert run_api.library_list({""uuid"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', + ""page_size"": 1}).json()['count'] == 0 # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today try: - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 + # when machine is created yesterday at 23:59:59.9999999 IST and test get triggered at 00:00:00.0000000 IST handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday try: - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 except AssertionError: # when machine is created yesterday at 23:59:59.9999999 IST and test get triggered at 00:00:00.0000000 IST handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week try: - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 except AssertionError: # when machine is created on week's last day at 23:59:59.9999999 IST and test get triggered on new week at 00:00:00.0000000 IST handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month try: - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 except AssertionError: # when machine is created on month's last day at 23:59:59.9999999 IST and test get triggered on new month at 00:00:00.0000000 IST handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' try: - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 except AssertionError: # when machine is created on year last day at 23:59:59.9999999 IST and test get triggered on new year at 00:00:00.0000000 IST handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) # .........When the created_date_range format is invalid - response = run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""invalid"", ""page_size"": 1}) + response = run_api.library_list({""uuid"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', + ""created_date_range"": ""invalid"", ""page_size"": 1}) test_assert.status(response, 400) assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() # .........When the created_start_date and created_end_date has whitespaces in them - assert run_api.ilibrary_list_island({""uuid"": ilib_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", - ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + assert run_api.library_list({""uuid"": lib_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", + ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + " -/ilibrary/rest/list/,fetching list of details of ilibrary for some existing UUID,,"{ -""status"" : 200, -""response"" : success , list provided -}","def test_ilibrary_list_with_uuid(run_api, ilibrary_add_new_island): +/library/rest/list/,fetching the list of virtual machine ,,,"@pytest.mark.parametrize(""lib_filter_kwargs"", [{""vm_names"": [f""{prefix_name}{rand_string()}"" for _ in range(library_count)]}], indirect=True) +def test_library_list_filter(run_api: apiops, lib_filter_kwargs): """""" - Fetch ilibrary list using uuid + Getting the list of VM present in the library by adding filters """""" - params, r = ilibrary_add_new_island - ilib_uuid = r.json()[""uuid""] - result = run_api.ilibrary_list_island(params={""uuid"": ilib_uuid}) - test_assert.status(result, 200) + templates, res = lib_filter_kwargs + # check for valid response data with the filter parameters + filter_on_input_result(run_api, library_count, templates, res, prefix_name, run_api.library_list) + + " -/ilibrary/rest/list/,fetching list of details of ilibrary using the name parameter,,"{ -""status"" : 200, -""response"" : success, list with specific name provided -}","def test_ilibrary_list_with_name(run_api, ilibrary_add_new_island): +/library/rest/list/,fetching the details list from library using the disks_uuid parameter,,"{ +""reponse"" :success +}","def test_library_list_fetch_with_disk_uuid(library_add_new_vm, run_api): """""" - Fetch ilibrary list valid name + Fetch list with 'disks_uuid' param """""" - params, r = ilibrary_add_new_island - lib_name = r.json()[""name""] - result = run_api.ilibrary_list_island(params={""name"": lib_name}) - test_assert.status(result, 200) + template, rjson = library_add_new_vm + params = {""disk_uuid"": rjson['hw']['disks'][0]['uuid']} + assert run_api.library_list(params).json()['count'] == 1 " -/ilibrary/rest/list/,fetching list of details of ilibrary when some UUID is provided that does not exist ,"{ -uuid = ""invalid +/library/rest/list/,fetching the details list from library using the mac parameter,"{ +mac = ""5A:54:00:12:23:34"" }","{ -""status"" : 200, -""response"" : success , empty list -}","def test_ilibrary_list_with_invalid_uuid(run_api): +""reponse"" :success +}","def test_library_list_fetch_with_mac(library_add_new_vm, run_api): """""" - Fetch ilibrary list using invalid uuid + Fetch list with 'mac' param """""" - uid = ""invalid"" - r = run_api.ilibrary_list_island(params={""uuid"": uid}) - test_assert.status(r, 200) + template, rjson = library_add_new_vm + mac = ""5A:54:00:12:23:34"" + params = {""mac"": mac} + rjson = run_api.library_list(params).json() + for machines in rjson['results']: + all_macs = [netwok['mac'] for netwok in machines['hw']['networks']] + assert mac in all_macs, ""Json |> %s"" % machines +" +/library/rest/list/,fetching the details list from library using the arch parameter,,"{ +""reponse"" :success +}","def test_library_fetch_with_arch(library_add_new_vm, run_api): + """""" + Fetch list with 'arch' + """""" + params, rjson = library_add_new_vm + arch = rjson['hw']['arch'] + params = {""arch"": arch} + rjson = run_api.library_list(params).json() + for machines in rjson['results']: + assert machines['hw']['arch'] == arch, ""Json |> %s"" % machines +" +/library/rest/list/,fetching the details list from library with tags,,"{ +""reponse"" :success +}","def test_library_fetch_with_tags(library_add_new_vm, run_api): + """""" + Fetch list with tags + """""" + # using _sessionid + params, rjson = library_add_new_vm + # using tag + vm_uuid = rjson['uuid'] + tag_params, result = run_api.tag_add(vm_uuid) + tag_value = tag_params['tag_list'][-1]['value'] + params = {""_sessionid"": rjson['tags'][0][""value""], ""tags"": tag_value} + assert run_api.library_list(params).json()['count'] == 1 " -/ilibrary/rest/list/,"fetching list of details of ilibrary with added filters. Check the user type before performing the operation. -",,"{ -""status"" : 200, -""response"" : success , filtered list -}","def test_ilibrary_list_filter(run_api): +/library/rest/list/,fetching the filtered details list from library using search parameter,,"{ +""reponse"" :success +}","def test_library_list_with_search_contains_uuid(library_add_new_vm, run_api): """""" - Getting the lists of Island Library by adding filters + fetch list search filter """""" - params, res = [], [] - ilibrary_count = 10 - arch = run_api.arch_type - prefix_name = f""filter_island_1_{rand_string()}_"" - isl_lib_name = [f""{prefix_name}{rand_string()}"" for _ in range(ilibrary_count)] - networks = template_networks() - if arch == ""aarch64"": - params1, r1 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - params2, r2 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - params3, r3 = run_api.library_add_new_vm(networks=networks, arch=""aarch64"", type=""virtio"", port=""vda"") - else: - params1, r1 = run_api.library_add_new_vm(networks=networks) - params2, r2 = run_api.library_add_new_vm(networks=networks) - params3, r3 = run_api.library_add_new_vm(networks=networks) - machine1 = { - ""uuid"": r1.json()[""uuid""], - ""nic_update_id"": r1.json()[""hw""][""networks""][0][""id""], - ""nic_delete_id"": r1.json()[""hw""][""networks""][2][""id""] - } - machine2 = { - ""uuid"": r2.json()[""uuid""], - ""nic_update_id"": r2.json()[""hw""][""networks""][1][""id""], - ""nic_delete_id"": r2.json()[""hw""][""networks""][0][""id""] - } - machine3 = { - ""uuid"": r3.json()[""uuid""], - ""nic_update_id"": r3.json()[""hw""][""networks""][2][""id""], - ""nic_delete_id"": r3.json()[""hw""][""networks""][1][""id""] - } - for i in range(ilibrary_count): - param, r = run_api.ilibrary_add_new_island(machine1=machine1, machine2=machine2, - machine3=machine3, name=isl_lib_name[i]) - params.append(param) - res.append(r) - random_int = randint(0, 9) - name_filter = {""name"": res[random_int].json().get(""name""), ""page_size"": ilibrary_count} - uuid_filter = {""uuid"": res[random_int].json().get(""uuid""), ""page_size"": ilibrary_count} - owner_filter = {""owner"": ""colama"" if run_api.user_type == ""admin"" - else ""vivekt"" if run_api.user_type == ""non-admin"" - else ""manager"", ""search"": prefix_name, ""page_size"": ilibrary_count} - island_type_filter = {""island_type"": choice([""private"", ""public""]), ""search"": prefix_name, ""page_size"": ilibrary_count} - filters = [name_filter, uuid_filter, owner_filter, island_type_filter] - exp_res = { - 0: [i.get(""name"") for i in params if i.get(""name"") == name_filter.get(""name"")], - 1: [i.json().get(""uuid"") for i in res if i.json().get(""uuid"") == uuid_filter.get(""uuid"")], - 2: [i.json().get(""owner"") for i in res], - 3: [i.json().get(""island_type"") for i in res if i.json().get(""island_type"") == island_type_filter.get(""island_type"")] - } - for filter in range(len(filters)): - r = run_api.ilibrary_list_island(filters[filter]) - # check for valid response data with the filter parameters - if len(r.json().get(""results"")) != len(exp_res[filter]): - logging.error(f""error in filter: {filters[filter]}, the list of expected result for the filter is: {exp_res[filter]}, and the actual result is {r.json()}"") - assert False + p, r = library_add_new_vm + uuid = r['uuid'] + params = {'search': f""uuid={uuid}""} + assert run_api.library_list(params).json()['count'] == 1 +" +/library/rest/list/,fetching the details list from library using invalid value of scope parameter,"{ +'scope': ""invaild"", +'uuid' +}",,"def test_library_list_with_invaild_scope_name(run_api, library_add_new_vm): + """""" + fetch list with invaild scope name + """""" + p, r = library_add_new_vm + lib_id = r['uuid'] + params = {'scope': ""invaild"", 'uuid': r['uuid']} + rjson = run_api.library_list(params).json() # 'all' is default scope gets applied on invalid scope + for machines in rjson['results']: + assert machines['uuid'] == lib_id, ""Json |> %s"" % machines - test_assert.status(r, 200) - run_api.library_delete(r1.json()[""uuid""], params1) - run_api.library_delete(r2.json()[""uuid""], params2) - run_api.library_delete(r3.json()[""uuid""], params3) - for i in range(ilibrary_count): - rjson = res[i].json() - if 'error' not in rjson.keys(): - uuid = rjson[""uuid""] - run_api.ilibrary_delete(uuid, params[i]) " -/ilibrary/rest/list/,fetching list of details of ilibrary with name that does not exist,"{ -name = ""invalid +/library/rest/list/,"fetching the details list from library using ""scope"" parameter","{ +'scope': ""public"" }","{ -""status"" : 200, -""response"" : success , empty list -}","def test_ilibrary_list_with_invalid_name(run_api): +""reponse"" :success +}","def test_library_list_with_public_scope(run_api, library_add_new_vm): """""" - Fetch ilibrary list using invalid name + fetch list with public scope name """""" - r = run_api.ilibrary_list_island(params={""name"": rand_string() + ""$$""}) - result = r.json() - test_assert.status(r, 200) - assert result[""count""] == 0 + p, r = library_add_new_vm + params = {'scope': ""public""} + rjson = run_api.library_list(params).json() + for machines in rjson['results']: + assert machines['is_public'] is True, ""Json |> %s"" % machines " -/ilibrary/rest/list/,fetching list of details of ilibrary without providing any specific params,,"{ +/library/rest/list/,"fetching the details list from library using ""kvm_type"" parameter","{ +""hvm_type"" +}","{ +""reponse"" :success +}","def test_library_fetch_with_kvm_type(library_add_new_vm, run_api): + """""" + Fetch list with 'kvm_type' + """""" + params, rjson = library_add_new_vm + kvm = rjson['hw']['hvm_type'] + params = {""hvm_type"": kvm} + rjson = run_api.library_list(params).json() + for machines in rjson['results']: + assert machines['hw']['hvm_type'] == kvm, ""Json |> %s"" % machines +" +/library/rest/list/,fetching the details list from library with ISO,"cdrom = [{ + ""type"": ""sata"", + ""iso"": rand_string(), + ""is_boot"": True, + ""boot_order"": 1 + } + ]","{ +""reponse"" :success +}","def test_library_fetch_with_iso(run_api): + """""" + Fetch list with 'iso' + """""" + cdrom = [{ + ""type"": ""sata"", + ""iso"": rand_string(), + ""is_boot"": True, + ""boot_order"": 1 + } + ] + params, r = run_api.library_add_new_vm(cdrom=cdrom) + rjson = r.json() + params = {""iso"": rjson['hw']['cdrom'][-1]['iso']} + assert run_api.library_list(params).json()['count'] == 1 + run_api.library_delete(rjson['uuid']) +" +/library/rest/nmodeltypes/,updation of cd rom in a library,,"{ ""status"" : 200, -""response"" : success , list provided -}","def test_ilibrary_list_without_params(run_api, ilibrary_add_new_island): - """""""""""" - Lists all the Island Library - """""""""""" - params, r = ilibrary_add_new_island - r = run_api.ilibrary_list_island() +""response"" : Types of Network Model +}","def test_library_nmodeltypes(run_api): + """""" + Getting the type of Network Model + """""" + r = run_api.library_nmodeltypes() + result = r.json() + test_assert.status(result, LIBRARY_NETWORK_MODEL_TYPE, ""library_nmodeltypes"") test_assert.status(r, 200) " -/ilibrary/rest/list/,fetching list of details of ilibrary without token and authorization,,"{ +/library/rest/nmodeltypes/,requesting the types of NetworkModel using with invalid token,,"{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided."" -}","def test_ilibrary_list_without_token(anonymous_exec_api): +""message"" : ""Invalid token"" +}","def test_library_nmodeltypes_with_invalid_token(invalid_exec_api): """""" - Fetch ilibrary list without token + with invalid token """""" - r = anonymous_exec_api.ilibrary_list_island() - result = r.json() + r = invalid_exec_api.library_nmodeltypes() test_assert.status(r, 401) - assert result['detail'] == ""Authentication credentials were not provided."" + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) " -/ilibrary/rest/list/,fetching list of details of island library by an admin user,,"{ +/library/rest/nmodeltypes/,requesting the types of NetworkModel without Authorization,,"{ ""status"" : 200, -""response"" : success -}","PARAMETERS = [{""dest_obj"": OBJ_ISL}] - - -@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) -def test_ilibrary_details_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): +""response"" : Types of Network Model +}","def test_library_nmodeltypes_without_authorization(anonymous_exec_api): """""" - Details of Ilibrary by Admin + without authorization """""" - # Admin check for fetching details of the Ilibrary created by different user. - ilibrary_id = custom_ilib_non_admin_operations - r = run_api.ilibrary_list_island({""uuid"": ilibrary_id}) + r = anonymous_exec_api.library_nmodeltypes() test_assert.status(r, 200) - assert r.json()[""count""] == 0 + result = r.json() + test_assert.status(result, LIBRARY_NETWORK_MODEL_TYPE, ""library_nmodeltypes"") " -/ilibrary/rest/revisions/,getting the list of revisions in ilibrary when Island UUID does not exist,"{ - uuid = 'invalid-island-library-uuid' -}","{ - ""status"": 404, - ""response"": not found -}","def test_ilibrary_revisions_invalid_uuid(run_api): +/library/rest/ntypes/,requesting the network type list,,200: Network Type List,"def test_library_ntypes(run_api): """""" - Getting the lists of revisions in Island Library with invalid uuid + Getting the list of Network type """""" - uuid = 'invalid-island-library-uuid' - r = run_api.ilibrary_revisions(uuid) - test_assert.status(r, 404) + r = run_api.library_ntypes() + result = r.json() + test_assert.status(result, LIBRARY_NETWORK_TYPE, ""library_ntypes"") + test_assert.status(r, 200) " -/ilibrary/rest/revisions/,getting the list of revisions in ilibrary without Authorization,"{ - uuid = 'valid-island-library-uuid' +/library/rest/ntypes/,fetching the list of network types of library when requested with invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_library_ntypes_with_invalid_token(invalid_exec_api): + """""" + with invalid token + """""" + r = invalid_exec_api.library_ntypes() + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) +" +/library/rest/revisions/,requesting the revision list of library without Authorization,"{ +machine_UUID : 'doesnotexits' }","{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided."" -}","def test_ilibrary_revisions_without_authorization(anonymous_exec_api): +""message"" : ""Authentication credentials were not provided"" +}","def test_library_revisions_without_authorization(anonymous_exec_api): """""" - Getting the lists of revisions in Island Library without authorization + without authorization """""" - uuid = 'valid-island-library-uuid' - r = anonymous_exec_api.ilibrary_revisions(uuid) + r = anonymous_exec_api.library_revisions('doesnotexits') test_assert.status(r, 401) - res = r.json() - assert res['detail'] == 'Authentication credentials were not provided.' + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) " -/ilibrary/rest/revisions/,getting the list of revisions in ilibrary when requested using invalid token,"{ - uuid = 'invalid-island-library-uuid' +/library/rest/revisions/,requesting the revision list of library when machine with the provided UUID does not exist,"{ +machine_UUID : 'doesnotexits' +}","{ +""status"" : 404, +""message"" : ""Machine with given UUID does not exist"" +}","def test_library_revisions_with_invaild_UUID(run_api): + """""" + library revision machine does not exist + """""" + r = run_api.library_revisions('doesnotexits') + test_assert.status(r, 404) + rjson = r.json() + assert rjson['detail'] == ""Machine with given UUID does not exist"", ""|> The error message is %s"" % rjson +" +/library/rest/revisions/,requesting the revision list of library when invalid token provided,"{ +machine_UUID : 'doesnotexits' }","{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_ilibrary_revisions_invalid_token(invalid_exec_api): +}","def test_library_revisions_with_invalid_token(invalid_exec_api): """""" - Getting the lists of revisions in Island Library with invalid token + with invalid token """""" - uuid = 'invalid-island-library-uuid' - r = invalid_exec_api.ilibrary_revisions(uuid) + r = invalid_exec_api.library_revisions('doesnotexits') test_assert.status(r, 401) - res = r.json() - assert res['detail'] == 'Invalid token.' + rjson = r.json() + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) " -/ilibrary/rest/revisions/,getting the list of revisions in ilibrary with filters,"FILTERS = [ - { - 'page': 1 - }, - { - 'page_size': 1 - }, - { - 'page': 1, - 'page_size': 1 - } -]","{ - ""status"": 200, - ""response"": revision list provided -}","FILTERS = [ - { - 'page': 1 - }, - { - 'page_size': 1 - }, - { - 'page': 1, - 'page_size': 1 - } -] - - -@pytest.mark.parametrize('filter', FILTERS) -def test_ilibrary_revisions_page_num(run_api, ideploy_deploy, filter): +/library/rest/revisions/,requesting the revision list of library,,"{ +""status"" : 200, +""response"" : Revision list of library +}","def test_library_revisions(library_revisions): """""" - Getting the lists of revisions in Island Library adding filters + revision list of library """""" - params, r = ideploy_deploy - x = r.json() - deploy_id = x[""deploy_uuid""] - r, rtask_details = run_api.ideploy_snapshot(deploy_id) - snapshot_id = rtask_details['result']['snapshotted_island_uuid'] - r = run_api.ilibrary_revisions(snapshot_id, filter) + params, r = library_revisions test_assert.status(r, 200) - run_api.ilibrary_delete(snapshot_id, {}) " -/ilibrary/rest/revisions/,getting the list of revisions in island library ,,"{ - ""status"": 200, - ""response"": revision list provided -}","def test_ilibrary_revisions(ilibrary_revisions): +/library/rest/segment_list/,fetching of segment list from library,,"{ +""status"" :200, +""response"" :success +}","def test_library_segmentlist(library_segmentlist): """""" - Getting the lists of revisions in Island Library + fetch segment list """""" - r = ilibrary_revisions + params, r = library_segmentlist test_assert.status(r, 200) " -/library/rest/add,adding vm to library when disks of IDE type are passed with is_uefi set to True,"disks = [{""size"": 20, ""port"": ""hda"", ""type"": ""ide"", ""format"": ""qcow2"", ""is_boot"": False}] -","{ -""status"" : 400, -""response"" : Bad request -}"," -def test_add_vm_to_library_ide_type_passed_with_uefi_true(run_api): +/library/rest/segment_list/,fetch segment list for library for brigde type of NIC,"{ +'nic_type': 'bridge' +}",,"def test_library_segment_with_nic_type(library_add_new_vm, run_api): """""" - if ide type passed with uefi true + Fetch library segment with nic type + """""" + p, res = library_add_new_vm + params = {'nic_type': 'bridge'} + r1 = run_api.library_segmentlist(params).json() + for segment in r1['results']: + assert segment['network_type'] == 'public' + params = {'nic_type': 'host'} + r2 = run_api.library_segmentlist(params).json() + for segment in r2['results']: + assert segment['network_type'] == 'hostOnly' + +" +/library/rest/segment_list/,"fetch segment list for library by setting the ""network_type"" parameter","{ +""network_type"" :hostOnly +}",,"def test_library_segmentlist_with_network_type(library_add_new_vm, run_api): + """""" + fetch segmentlist with network type """""" - disks = [{""size"": 20, ""port"": ""hda"", ""type"": ""ide"", ""format"": ""qcow2"", ""is_boot"": False}] - params, response = run_api.library_add_new_vm(disks=disks, noraise=True, is_uefi=True) - test_assert.status(response, 400) + p, r = library_add_new_vm + params = {'network_type': 'hostOnly'} + r = run_api.library_segmentlist(params).json() + for segment in r['results']: + segment['network_type'] == 'hostOnly' " -/library/rest/add,"adding vm to library when machine name contains ""#""","{ 'name': newtxt, 'noraise': True }","{ - ""status"": 401, - ""message"": ""Name cannot contain '/' or '#"" -} -","def test_add_vm_to_library_with_name_contains_hash(run_api): +/library/rest/segment_list/,"fetch segment list for library by setting search parameter to ""host""","{ +search : ""host"" +}",,"def test_library_segmentlist_with_search_param(library_add_new_vm, run_api): """""" - if machine name contains ""#"" + fetch segmentlist with search params """""" - - txt = rand_string() - random_index = random.randint(0, len(txt)) - - newtxt = txt[:random_index] + random.choice(['#', '/']) + txt[random_index:] - kwargs = { - 'name': newtxt, - 'noraise': True - } - params, response = run_api.library_add_new_vm(**kwargs) - test_assert.status(response, 400) - rjson = response.json() - assert rjson[""error""] == ""Name cannot contain '/' or '#"", ""The error message is {}"".format(rjson[""error""]) + p, r = library_add_new_vm + params = {'search': 'host'} + r = run_api.library_segmentlist(params).json() + for segment in r['results']: + segment['network_type'] == 'hostOnly' " -/library/rest/add,adding vm to library when multiple bootable cds and same boot order is passed,"cdrom = [{ ""type"": ""sata"", ""iso"": """", ""is_boot"": True, ""boot_order"": 1 }, { ""type"": ""sata"", ""iso"": """", ""is_boot"": True, ""boot_order"": 1 }]","{ +/library/rest/upload_disk/{UUID}/,uploading disk when the disk size does not match,,"{ ""status"" : 400, -""response"" : Bad request -}","def test_add_vm_to_library_multiple_bootable_cds_with_same_boot_order(run_api): +""message"" : ""Disk size mismatch."" +}","def test_library_upload_disk_mismatch_disk_size(library_add_new_vm, run_api): """""" - If multiple bootable cds with same boot order is passed + Mismatch disk size """""" - cdrom = [{ - ""type"": ""sata"", - ""iso"": """", - ""is_boot"": True, - ""boot_order"": 1 - }, - { - ""type"": ""sata"", - ""iso"": """", - ""is_boot"": True, - ""boot_order"": 1 - }] - - params, response = run_api.library_add_new_vm(cdrom=cdrom, noraise=True) - test_assert.status(response, 400) + p, r = library_add_new_vm + lib_id = r['UUID'] + disk_UUID = r['hw']['disks'][0]['UUID'] + r = run_api.library_upload_disk(lib_id, disk_UUID) + test_assert.status(r, 400) + rjson = r.json() + assert re.match(r'Disk size mismatch. Uploaded disk size: (\d+), old disk size: (\d+)', rjson['error']), ""json %s"" % rjson " -/library/rest/add,adding vm to library when provided with valid data,,"{ - ""status"": 201, - ""response"": Machine details +/library/rest/upload_disk/{UUID}/,uploading disk when machine UUID provided is invalid,"{ +lib_id = ""invalid"", +disk_UUID = ""invalid"" } -","def test_add_vm_to_library_with_vaild_data(run_api,): +","{ +""status"" : 404, +""message"" : ""Upload Disk: Machine not found"" +}","def test_library_upload_disk_invalid_machine_UUID(run_api): """""" - When provided with valid data + Invalid machine UUID """""" - params, response = run_api.library_add_new_vm() - test_assert.status(response, 201) - UUID = response.json()[""UUID""] - run_api.library_delete(UUID, {}) + lib_id = ""invalid"" + disk_UUID = ""invalid"" + r = run_api.library_upload_disk(lib_id, disk_UUID) + test_assert.status(r, 404) + rjson = r.json() + assert rjson['error'] == 'Upload Disk: Machine not found', ""json %s"" % rjson " -/library/rest/add,adding vm to library when requested with invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_add_vm_to_library_invaild_token(invalid_exec_api): +/library/rest/upload_disk/{UUID}/,uploading disk when disk_UUID provided is invalid,,"{ +""status"" : 404, +""message"" : ""Upload Disk: Disk not found"" +}","def test_library_upload_disk_invalid_disk_UUID(library_add_new_vm, run_api): """""" - invalid Token + Invalid disk UUID """""" - - params, response = invalid_exec_api.library_add_new_vm(noraise=True) - test_assert.status(response, 401) - rjson = response.json() - assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) + p, r = library_add_new_vm + lib_id = r['UUID'] + disk_UUID = ""invalid"" + r = run_api.library_upload_disk(lib_id, disk_UUID) + test_assert.status(r, 404) + rjson = r.json() + assert rjson['error'] == 'Upload Disk: Disk not found', ""json %s"" % rjson " -/library/rest/add,adding vm to library without Authorization,,"{ +/library/rest/viewmachinelist/,"getting the list of machines where ""scope"" param is set to public","params = { + ""scope"": 'public' + }","{ +""status"" : 200, +""response"" : Machine Details list +}","def test_library_viewmachinelist_with_public_scope(run_api): + """""" + provide ""scope"" as public + """""" + params = { + ""scope"": 'public' + } + r = run_api.library_viewmachinelist(params) + test_assert.status(r, 200) +" +/library/rest/viewmachinelist/,getting the list of machines when requested without Authorization,,"{ ""status"" : 401, ""message"" : ""Authentication credentials were not provided"" -}","def test_add_vm_to_library_without_authorization(anonymous_exec_api): +}","def test_library_viewmachinelist_without_authorization(anonymous_exec_api): """""" without authorization """""" - - params, response = anonymous_exec_api.library_add_new_vm(noraise=True) - test_assert.status(response, 401) - rjson = response.json() + r = anonymous_exec_api.library_viewmachinelist() + test_assert.status(r, 401) + rjson = r.json() assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) " -/library/rest/adddisk/{{UUID}}/ ,adding disk to library if tried to add a disk with same boot order as existing disk,"params = { - 'boot_order': 1 +/library/rest/viewmachinelist/,getting the list of machines when requested with params- disk_size_min and disk_size_max,"params = { + 'disk_size_min': 0, + 'disk_size_max': 10000 }","{ -""status"" : 400, -""response"" : Bad Request -}","def test_lib_add_disk_with_same_boot_order(run_api, library_add_new_vm): - lib_params, r = library_add_new_vm - lib_UUID = r[""UUID""] +""status"" : 200, +""response"" : Machine Details list +}","def test_library_viewmachinelist_with_min_and_max_disk(run_api): + """""" + provide disk_size_min and disk_size_max params + """""" params = { - 'boot_order': 1 + 'disk_size_min': 0, + 'disk_size_max': 10000 } - r = run_api.library_add_disk(lib_UUID, params) - test_assert.status(r, 400) + r = run_api.library_viewmachinelist(params) + test_assert.status(r, 200) " -/library/rest/adddisk/{{UUID}}/ ,adding disk to library if tried to add IDE type disks for UEFI enabled library,"params = { - 'type': 'ide', - 'port': 'hdc' - }","{ -""status"" : 400 -}","def test_lib_add_disk_with_uefi_enabled(run_api, ): - lib_params, r = run_api.library_add_new_vm(noraise=True, is_uefi=True) - lib_UUID = r.json()[""UUID""] +/library/rest/viewmachinelist/,getting the list of machines when requested with params - ram_min and ram_max,"params = { 'ram_min': 0, 'ram_max': 10000 }","{ +""status"" : 200, +""response"" : Machine Details list +}","def test_library_viewmachinelist_with_min_and_max_ram(run_api): + """""" + provide ram_min and ram_max params + """""" params = { - 'type': 'ide', - 'port': 'hdc' + 'ram_min': 0, + 'ram_max': 10000 } - r = run_api.library_add_disk(lib_UUID, params) - test_assert.status(r, 400) - run_api.library_delete(lib_UUID, lib_params) + r = run_api.library_viewmachinelist(params) + test_assert.status(r, 200) " -/library/rest/adddisk/{{UUID}}/ ,adding disk to library When provided correct UUID and correct data,,"{ -""status"" : 201, -""response"" : Disks should be added to lib -}"," -PARAMETERS = [{""dest_obj"": OBJ_LIB}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_lib_add_disk_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): +/library/rest/viewmachinelist/,getting the list of machines when requested with param - page_size and page no,"params = { + 'page_size': 1, + 'page_no': 1 + }","{ +""status"" : 200, +""response"" : Machine Details list +}","def test_library_viewmachinelist_with_page_size_and_page_no(run_api): """""" - adding disk to a library by Admin + provide page_size and page_no """""" - # Admin check for adding disk to a library created by different user. - lib_id = custom_lib_non_admin_operations - r = run_api.library_add_disk(lib_id) - test_assert.status(r, 201) + params = { + 'page_size': 1, + 'page_no': 1 + } + r = run_api.library_viewmachinelist(params) + test_assert.status(r, 200) " -/library/rest/adddisk/{{UUID}}/ ,adding disk to library when requested with invalid token,"{ -lib_id = ""doesnotexits"" -}","{ +/library/rest/viewmachinelist/,getting the list of machines when requested with invalid token,,"{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_lib_add_disk_with_invalid_token(invalid_exec_api): +}","def test_library_viewmachinelist_with_invalid_token(invalid_exec_api): """""" with invalid token """""" - lib_id = ""doesnotexits"" - r = invalid_exec_api.library_add_disk(lib_id) + r = invalid_exec_api.library_viewmachinelist() test_assert.status(r, 401) rjson = r.json() assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) " -/library/rest/adddisk/{{UUID}}/ ,adding disk to library without Authorization,"{ -lib_id = ""doesnotexits"" -}","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_lib_add_disk_without_authorization(anonymous_exec_api): +/library/rest/viewmachinelist/,getting the list of machines when requested using the search parameter.,params = { 'search': 'machine' },"{ +""status"" : 200, +""response"" : Machine Details list +}","def test_library_viewmachinelist_search_parameter(run_api): """""" - without authorization + provide search parameter """""" - lib_id = ""doesnotexits"" - r = anonymous_exec_api.library_add_disk(lib_id) - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) + params = { + 'search': 'machine' + } + r = run_api.library_viewmachinelist(params) + test_assert.status(r, 200) " -/library/rest/boot types/,getting boot type list when Requested,,"{ +/library/rest/viewmachinelist/,getting the list of machines when requested,,"{ +""status"" : 200, +""response"" : Machine Details list +}","def test_library_viewmachinelist(run_api): + """""" + getting the list of machines + """""" + r = run_api.library_viewmachinelist() + test_assert.status(r, 200) +" +/profile/rest/get/,"fetching list of profiles. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ ""status"" : 200, -""response"" : Boot type list -}","def test_library_boottypes(run_api): +""response"" : list of profiles +}","def test_profile_list(run_api, profile_list): """""" - Getting the list of Boot type + Fetch list of all profiles """""" - r = run_api.library_boottypes() - result = r.json() - test_assert.status(result, LIBRARY_BOOT_TYPE, ""library_boottypes"") - test_assert.status(r, 200) + r = profile_list + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 200) " -/library/rest/boottypes/,getting boot type list when requested with invalid token,,"{ +/profile/rest/get/,fetching list of profiles without authorization,,"{ ""status"" : 401, -""message"" : ""Invalid token"" -}","def test_library_boottypes_with_invalid_token(invalid_exec_api): +""message"" : ""Authentication credentials were not provided."" +}","def test_profile_list_without_authorization(anonymous_exec_api): """""" - Getting the list of Boot type when invalid token provided + Fetch list of all profiles without authorization """""" - r = invalid_exec_api.library_boottypes() + r = anonymous_exec_api.profile_list() + res = r.json() test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) + assert res['detail'] == ""Authentication credentials were not provided."" " -/library/rest/bulkdelete/,deployment of deletion of machines in bulk when empty list of UUIDs is passed ,"machine = { ""machine_list"": [] }","{ -""status"" : 400 -}","def test_library_bulk_delete_with_empty_list(run_api): +/profile/rest/get/,fetching list of profiles using invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_profile_list_with_invalid_token(invalid_exec_api): """""" - When empty list is passed + Fetch list of all profiles with invalid token """""" - machine = { - ""machine_list"": [] - } - res = run_api.library_bulkdelete(machine) - - test_assert.status(res, 400) - rjson = res.json() - assert rjson['error'] == ""machine_list cannot be null or empty"", ""|> Json %s"" % rjson + r = invalid_exec_api.profile_list() + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" " -/library/rest/bulkdelete/,deployment of deletion of machines in bulk when passed a list UUIDs of all deletable machines,,"{ -""status"" : 204, -""response"" : ""Machine deleted successfully"" -}","def test_library_bulk_delete(library_bulkdelete): +/profile/rest/self/,fetching details of self profile without authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}"," +def test_profile_self_without_authorization(anonymous_exec_api): """""" - Deleting multiple VM's + Fetching details of self profile without authorization """""" - params, r = library_bulkdelete - test_assert.status(r, 204) + r = anonymous_exec_api.profile_self() + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" " -/library/rest/bulkdelete/,deployment of deletion of machines in bulk when requested with invalid token,"machine = { ""machine_list"": [] }","{ +/profile/rest/self/,fetching details of self profile with invalid token,,"{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_library_bulk_delete_with_invalid_token(invalid_exec_api): +}","def test_profile_self_with_invalid_token(invalid_exec_api): """""" - Invalid token + Fetching details of self profile with invalid token """""" - machine = { - ""machine_list"": [] - } - res = invalid_exec_api.library_bulkdelete(machine) - - test_assert.status(res, 401) - rjson = res.json() - assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) + r = invalid_exec_api.profile_self() + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" " -/library/rest/bulkdelete/,deployment of deletion of machines in bulk when requested with invalid token,"machine = { ""machine_list"": ['invalid'] }","{ -""status"" : 400, -""message"" : Machine matching query does not exist."" -}","def test_library_bulk_delete_invalid_id(run_api): +/profile/rest/self/,fetching details of self profile,,"{ +""status"" : 200, +""response"" : Self profile details +}","def test_profile_self(run_api, profile_self): """""" - provide invalid machine id + Fetching details of self profile """""" - machine = { - ""machine_list"": ['invalid'] + r = profile_self + res = r.json() + assert res['username'] == run_api.user + test_assert.status(r, 200) +" +/profile/rest/set_group/{user_id}/,setting group to profile without authorization,"{ +groups = { + ""add"": [], + ""remove"": ""valid_group_name"" +}, +user_id = id +} ","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_profile_set_group_without_authorization(anonymous_exec_api): + """""" + Set group to profile without authorization + """""" + groups = { + ""add"": [], + ""remove"": [""valid-group-name""] } - res = run_api.library_bulkdelete(machine) - - test_assert.status(res, 400) - rjson = res.json() - assert rjson['failure'][0]['error'] == ""Machine matching query does not exist."", ""|> The Error is {}"".format(rjson) + r = anonymous_exec_api.profile_set_group(user_id=id, params=groups) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Authentication credentials were not provided.' " -/library/rest/bulkdelete/,deployment of deletion of machines in bulk without Authorization,"machine = { ""machine_list"": [] }","{ +/profile/rest/set_group/{user_id}/,setting group to profile using invalid token,"{ +groups = { + ""add"": [], + ""remove"": ""valid_group_name"" +}, +user_id = id +} ","{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_library_bulk_delete_without_authorization(anonymous_exec_api): +""message"" : ""Invalid token"" +}","def test_profile_set_group_with_invalid_token(invalid_exec_api): """""" - without authorization + Set group to profile with invalid token """""" - machine = { - ""machine_list"": [] + groups = { + ""add"": [], + ""remove"": [""valid-group-name""] } - res = anonymous_exec_api.library_bulkdelete(machine) - - test_assert.status(res, 401) - rjson = res.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The Error is {}"".format(rjson['detail']) + r = invalid_exec_api.profile_set_group(user_id = id, params = groups) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == 'Invalid token.' " -/library/rest/clone/{{UUID}}/,"cloning a library when provided with valid data. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +/profile/rest/set_group/{user_id}/,"setting group to profile for valid User ID and valid group names.Check the user type before performing the operation, only admin user type have the permission to perform such operations. ",,"{ -""status"" : 200, -""response"" : Data of newly cloned machine -}","endpoint = ""lib_clone"" -PARAMETERS = [{""dest_obj"": OBJ_LIB}] - - -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_library_clone(library_clone_vm, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): +""status"" : 201, +""response"" : success +}","def test_profile_set_group(profile_set_group, run_api): """""" - Cloning VM + Set group to profile """""" - template, r = library_clone_vm - result = r.json() - test_assert.status(template, result, ""library_clone"") - test_assert.status(r, 200) - - # Adding non_admin check to Clone a Library Image created by different user - if run_api.user_type == USER_TYPE[""non_admin""]: - lib_id = custom_lib_admin_operations - param, r = run_api.library_clone_vm(lib_id) + r = profile_set_group + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: test_assert.status(r, 403) - - if run_api.user_type == USER_TYPE[""manager""]: - # When the user is not part of the group that the manager manages - lib_id = custom_lib_admin_operations - param, r = run_api.library_clone_vm(lib_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) - - # When the user is part of the group that the manager manages - lib_id = custom_lib_non_admin_operations - param, r = run_api.library_clone_vm(lib_id) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) - param[""UUID""] = lib_id - clone_UUID = r.json()['UUID'] - run_api.library_delete(clone_UUID, param)" -/library/rest/clone/{{UUID}}/,cloning library when clone name contains #,"clone = { - ""mac_list"": [], - ""name"": cl_name, - ""description"": ""This is test description for %s"" % cl_name, - }","{ -""status"" : 400, -""message"" : ""Name cannot contain '/' or '#'"" -}","def test_library_clone_name_contains_hash(library_add_new_vm, run_api): - """""" - When clone name contains # - """""" - txt = rand_string() - random_index = random.randint(0, len(txt)) - - newtxt = txt[:random_index] + random.choice(['#', '/']) + txt[random_index:] - cl_name = f""{newtxt}_cl"" - clone = { - ""mac_list"": [], - ""name"": cl_name, - ""description"": ""This is test description for %s"" % cl_name, - } - params, r = library_add_new_vm - UUID = r.get('UUID', 'doesnotexits') - clone_params, clone_r = run_api.library_clone_vm(UUID, clone) - test_assert.status(clone_r, 400) - rjson = clone_r.json() - assert rjson['error'] == ""Name cannot contain '/' or '#"", ""|> The error is {}"".format(rjson['error']) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 201) " -/library/rest/clone/{{UUID}}/,cloning library when clone name is empty,"clone = { - ""mac_list"": [], - ""name"": """", - ""description"": ""This is test description for %s"", - } +/profile/rest/set_group/{user_id}/,"setting group to profile for invalid ID. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ +groups = { + ""add"": [], + ""remove"": ""valid_group_name"" +}, +user_id = 0 +} ","{ ""status"" : 400, -""message"" : ""Please provide clone name"" -}","def test_library_clone_with_empty_name(library_add_new_vm, run_api): +""response"" : failure +}","def test_profile_set_group_invalid_user_id(run_api): """""" - Empty name + Set group to profile by invalid user id """""" - clone = { - ""mac_list"": [], - ""name"": """", - ""description"": ""This is test description for %s"", + groups = { + ""add"": [], + ""remove"": [random.choice(GROUPS)] } - params, r = library_add_new_vm - UUID = r['UUID'] - clone_params, clone_r = run_api.library_clone_vm(UUID, clone) - test_assert.status(clone_r, 400) - rjson = clone_r.json() - assert rjson['error'] == ""Please provide clone name"", ""|> The Error is {}"".format(rjson) + r = run_api.profile_set_group(user_id=0, params=groups) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 400) " -/library/rest/clone/{{UUID}}/,cloning library when duplicate mac provided,"networks = [ - { - ""type"": ""bridge"", - ""model"": ""virtio"", - ""segment"": ""Default Public Segment"", - ""mac"": generate_mac_address() - } - ]","{ +/profile/rest/set_group/{user_id}/,"setting group to profile for invalid group names.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +groups = { + ""add"": [], + ""remove"": [invalid_group_name] +}, +user_id = id +} ","{ ""status"" : 400, -""message"" : ""Mac is already present"" -}","@pytest.mark.skip(""Return 400 but create a clone of vm"") -def test_library_clone_duplicate_mac(run_api): +""message"" : ""Group matching query does not exist"" +}","def test_profile_set_group_invalid_group_name(run_api, admin_exec_api): """""" - library clone with duplicate mac provided + Set group to profile by invalid group name """""" - networks = [ - { - ""type"": ""bridge"", - ""model"": ""virtio"", - ""segment"": ""Default Public Segment"", - ""mac"": generate_mac_address() - } - ] - params, r = run_api.library_add_new_vm(networks=networks) - rjson = r.json() - mac = rjson['hw']['networks'][-1]['mac'] - name = rjson['name'] - cl_name = rand_string() - clone = { - ""mac_list"": [mac,], - ""name"": cl_name, - ""description"": ""This is test description for %s"" % cl_name, + groups = { + ""add"": [], + ""remove"": [""invalid-group-name""] } - UUID = rjson['UUID'] - clone_params, clone_r = run_api.library_clone_vm(UUID, clone) - test_assert.status(clone_r, 400) - clone_rjson = clone_r.json() - assert clone_rjson['error'] == ""Mac is already present in %s"" % name, ""|> The Error is {}"".format(clone_rjson) - run_api.library_delete(UUID) + r = admin_exec_api.profile_list() + res = r.json() + profile = random.choice(res) + id = profile['id'] + r = run_api.profile_set_group(user_id=id, params=groups) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + rjson = r.json() + test_assert.status(r, 400) + assert rjson['error'] == ""Group matching query does not exist."" " -/library/rest/clone/{{UUID}}/,cloning library when Provided with machine UUID that does not exist,"{ -UUID = 'doesnotexits' +/rtask/rest/children/{UUID}/,fetching the list of children jobs without authorization,"{ +uuid = ""valid_uuid"" }","{ -""status"" : 404, -""message"" : ""Clone : Machine not found"" -}","def test_library_clone_with_wrong_machine_UUID(library_add_new_vm, run_api): +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_rtask_list_children_without_authorization(anonymous_exec_api): """""" - when Provided machine UUID does not exist + Fetching the List of childrens of a job without authorization """""" - UUID = 'doesnotexits' - clone_params, clone_r = run_api.library_clone_vm(UUID) - test_assert.status(clone_r, 404) - rjson = clone_r.json() - assert rjson['error'] == ""Clone: Machine not found"", ""|> The error message is {}"".format(rjson['error']) + r = anonymous_exec_api.rtask_list_children(""invalid-uuid"") + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" " -/library/rest/clone/{{UUID}}/,cloning library when requested with invalid token,"{ -UUID = 'doesnotexits' +/rtask/rest/children/{UUID}/,fetching the list of children jobs when requested with invalid UUID,"{ +uuid = ""invalid_uuid"" }","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_library_clone_invaild_token(invalid_exec_api): +""status"" : 400 / 404, +""response"" : Bad request +}","def test_rtask_list_children_invalid_uuid(run_api): """""" - clone request with invalid token + Fetching the List of childrens of a job having invalid uuid """""" - - UUID = 'doesnotexits' - clone_params, clone_r = invalid_exec_api.library_clone_vm(UUID) - test_assert.status(clone_r, 401) - rjson = clone_r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) + r = run_api.rtask_list_children(""invalid-uuid"") + status_code = r.status_code + assert status_code in [400, 404] " -/library/rest/clone/{{UUID}}/,cloning library without Authorization,"{ -UUID = 'doesnotexits' +/rtask/rest/children/{UUID}/,fetching the list of children jobs when requested with invalid token,"{ +uuid = ""valid_uuid"" }","{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_library_clone_without_authorization(anonymous_exec_api): +""message"" : ""Invalid token"" +}","def test_rtask_list_children_with_invalid_token(invalid_exec_api): """""" - clone without authorization + Fetching the List of childrens of a job with invalid token """""" - - UUID = 'doesnotexits' - clone_params, clone_r = anonymous_exec_api.library_clone_vm(UUID) - test_assert.status(clone_r, 401) - rjson = clone_r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The Error is {}"".format(rjson['detail']) + r = invalid_exec_api.rtask_list_children(""invalid-uuid"") + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" " -/library/rest/ctypes/,getting the console type when requested without Authorization,,"{ +/rtask/rest/children/{UUID}/,"fetching the list of children jobs using valid data +",,"{ ""status"" : 200, -""response"" : console type details displayed -}","def test_library_ctypes_without_authorization(anonymous_exec_api): +""response"" : Children Task listed +}","def test_rtask_list_children(rtask_list_children): """""" - without authorization + Fetching the List of children of a island deploy job """""" - r = anonymous_exec_api.library_console_types() - result = r.json() - test_assert.status(result, LIBRARY_CONSOLE_TYPE, ""library_ctypes"") + params, r = rtask_list_children + rjson = r.json() test_assert.status(r, 200) + assert rjson[""count""] == len(params[""machines""][""add""]) + assert rjson[""results""][0][""type_name""] == ""Deploy"" " -/library/rest/delete/{UUID}/,deleting a library by admin when provided with valid UUID,,"{ -""status"" : 204 -}","PARAMETERS = [{""dest_obj"": OBJ_LIB}] - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_lib_delete_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): - """""" - Deleting the Library by Admin - """""" - # Admin check for deleting the Library created by different user. - lib_id = custom_lib_non_admin_operations - r = run_api.library_delete(lib_id, {}) - test_assert.status(r, 204) - -" -/library/rest/delete/{UUID}/,deleting a library by manager when provided with valid UUID,,,"endpoint = ""lib_delete"" - -PARAMETERS = [{""dest_obj"": OBJ_LIB}] - - -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) -def test_lib_delete_manager(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): - """""" - Delete the Library by Manager - """""" - # When the user is not part of the group that the manager manages - lib_id = custom_lib_admin_operations - r = run_api.library_delete(lib_id, {}) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) - - # When the user is part of the group that the manager manages - lib_id = custom_lib_non_admin_operations - r = run_api.library_delete(lib_id, {}) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) -" -/library/rest/delete/{UUID}/,deleting a library when machine UUID does not exist,"{ - lib_id = ""invalid"" - +/rtask/rest/delete/{UUID}/,deleting task without authorization,"{ +uuid = ""valid_uuid"" }","{ -""status"" : 404, -""message"" : Machine DoesNotExist -}","def test_lib_delete_with_invalid_UUID(run_api): +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_rtask_delete_without_authorization(anonymous_exec_api): """""" - When machine UUID does not exist + Deleting the task without authorization """""" - lib_id = ""invalid"" - ret = run_api.library_delete(lib_id) - test_assert.status(ret, 404) + r = anonymous_exec_api.rtask_delete(""valid-uuid"") + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" " -/library/rest/delete/{UUID}/,deleting a library when requested with invalid token,"{ -lib_id = 'wrong' +/rtask/rest/delete/{UUID}/,deleting task when requested with invalid token,"{ +uuid = ""valid_uuid"" }","{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_lib_delete_with_invalid_token(invalid_exec_api): +}","def test_rtask_delete_with_invalid_token(invalid_exec_api): """""" - without authorization + Deleting the task with invalid token """""" - lib_id = 'wrong' - ret = invalid_exec_api.library_delete(lib_id) - test_assert.status(ret, 401) - rjson = ret.json() - assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) + r = invalid_exec_api.rtask_delete(""valid-uuid"") + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" " -/library/rest/delete/{UUID}/,deleting a library when UUID exists and it has next revision/ deployment exists,,"{ -""status"" : 400, -""response"" : Bad Request -}","def test_lib_delete_with_deployment_exists(run_api, library_add_new_vm): +/rtask/rest/delete/{UUID}/,"deleting task of valid UUID.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 403 / 204, +""response"" : Task deleted successfully +}","def test_rtask_delete(run_api, rtask_delete): """""" - When UUID exists and it has next revision/ deployment exists + Deleting the task """""" - params, r = library_add_new_vm - lib_id = r[""UUID""] - deploy = run_api.deploy_image(lib_id) - r = run_api.library_delete(lib_id, {}) - test_assert.status(r, 400) - deployjson = deploy.json() - run_api.deploy_image_delete(deployjson['UUID'], {}) + r = rtask_delete + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 204) +" +/rtask/rest/delete/{UUID}/,"deleting task of invalid UUID. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +uuid = ""invalid_uuid"" +}","{ +""status"" : 400 / 404, +""response"" : Bad request +}","def test_rtask_delete_invalid_uuid(run_api): + """""" + Deleting the task with invalid token + """""" + r = run_api.rtask_delete(""invalid-uuid"") + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + status_code = r.status_code + assert status_code in [404, 400] " -/library/rest/delete/{UUID}/,deleting a library without Authorization,"{ -lib_id = 'wrong' +/rtask/rest/detail/{UUID}/,getting details of task without authorization,"{ +uuid = ""valid_uuid"" }","{ ""status"" : 401, ""message"" : ""Authentication credentials were not provided"" -}","def test_lib_delete_without_authorization(anonymous_exec_api): +}","def test_rtask_details_without_authorization(anonymous_exec_api): """""" - without authorization + Getting details of Task without authorization """""" - lib_id = 'wrong' - ret = anonymous_exec_api.library_delete(lib_id) - test_assert.status(ret, 401) - rjson = ret.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The Error is {}"".format(rjson['detail']) + r = anonymous_exec_api.rtask_details(""valid-uuid"") + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" " -/library/rest/details/{UUID}/ ,getting library details ,,"{ +/rtask/rest/detail/{UUID}/,getting details of task with invalid token,"{ +uuid = ""valid_uuid"" +}","{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_rtask_details_with_invalid_token(invalid_exec_api): + """""" + Getting details of Task with invalid token + """""" + r = invalid_exec_api.rtask_details(""valid-uuid"") + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +/rtask/rest/detail/{UUID}/,getting details of task for valid uuid,,"{ ""status"" : 200, -""response"" : Library details displayed -}","def test_lib_details(library_details): +""response"" : Details provided +}","def test_rtask_details(rtask_details): """""" - Getting the Library details + Getting details of Specific Task """""" - x, r = library_details + params, r = rtask_details + res = r.json() + test_assert.status(res, params, ""rtask_details"", ""server"") test_assert.status(r, 200) " -/library/rest/details/{UUID}/ ,getting library details requested by a non-admin user,,"{ -""status"" : 403 -}"," -PARAMETERS = [{""dest_obj"": OBJ_LIB}] - -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) -def test_lib_details_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): +/rtask/rest/detail/{UUID}/,getting details of task for invalid uuid,"{ +uuid = ""invalid_uuid"" +}","{ +""status"" : 400, +""response"" : Bad request +}","@pytest.mark.skip(reason=""Skipping this test because it is returning 404 in place of 400"") +def test_rtask_details_invalid_uuid(run_api): """""" - Details of the Library by non-Admin + Getting details of Task by providing invalid uuid """""" - # Non-admin check for fetching details of the Library created by different user. - lib_id = custom_lib_admin_operations - r = run_api.library_details(lib_id, {}) - test_assert.status(r, 403) - + r = run_api.rtask_details(""invalid-uuid"") + # res = r.json() + test_assert.status(r, 400) " -/library/rest/details/{UUID}/ ,getting library details requested by an admin user,,"{ +/rtask/rest/list/,fetching the list of jobs with added filters,,"{ ""status"" : 200, -""response"" : Library details displayed -}","PARAMETERS = [{""dest_obj"": OBJ_LIB}] +""response"" : listed jobs +}","PARAMETERS = [ + {""page"": 1, ""page_size"": 5}, + {""search"": ""finished""}, + {""ordering"": ""mtime""}, + {""ordering"": ""-mtime""}, + {""ordering"": ""status""}, + {""ordering"": ""-status""}, + {""ordering"": ""job_type""}, + {""ordering"": ""-job_type""} +] -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_lib_details_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): +@pytest.mark.parametrize(""filter"", PARAMETERS) +def test_rtask_list_with_filter(run_api, filter): """""" - Details of the Library by Admin + Fetching the List of Jobs based on filter """""" - # Admin check for fetching details of the Library created by different user. - lib_id = custom_lib_non_admin_operations - r = run_api.library_details(lib_id, {}) + r = run_api.rtask_list(filter) test_assert.status(r, 200) " -/library/rest/details/{UUID}/ ,getting library details when provided with invalid UUID,"{ - UUID = 'invalid' -}","{ -""status"" : 200, -""message"" : ""Machine DoesNotExist"" -}","def test_lib_details_with_invalid_UUID(run_api): - """""" - when provided invalid UUID - """""" - UUID = 'invalid' - r = run_api.library_details(UUID, {}) - test_assert.status(r, 404) - rjson = r.json() - assert rjson['error'] == ""Machine Details: Machine not found"", ""|> The error message is %s"" % (rjson['error']) -" -/library/rest/details/{UUID}/ ,getting library details when provided without Authorization,"{ - UUID = 'valid_UUID' -}","{ +/rtask/rest/list/,fetching the list of jobs when requested with invalid token,,"{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}"," -def test_lib_details_without_authorization(anonymous_exec_api): +""message"" : ""Invalid token"" +}","def test_rtask_list_with_invalid_token(invalid_exec_api): """""" - without authorization + Fetching the List of Jobs with invalid token """""" - UUID = 'invalid' - r = anonymous_exec_api.library_details(UUID, {}) + r = invalid_exec_api.rtask_list() + res = r.json() test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The Error is {}"".format(rjson['detail']) + assert res['detail'] == ""Invalid token."" " -/library/rest/details/{UUID}/ ,getting library details when requested with invalid token,"{ - UUID = 'valid_UUID' -}","{ +/rtask/rest/list/,fetching the list of jobs without authorization,,"{ ""status"" : 401, -""message"" : ""Invalid token"" -}","def test_lib_details_with_invalid_token(invalid_exec_api): +""message"" : ""Authentication credentials were not provided"" +}","def test_rtask_list_without_authorization(anonymous_exec_api): """""" - invalid token + Fetching the List of Jobs without authorization """""" - UUID = 'invalid' - r = invalid_exec_api.library_details(UUID, {}) + r = anonymous_exec_api.rtask_list() + res = r.json() test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) + assert res['detail'] == ""Authentication credentials were not provided."" " -/library/rest/dformattypes/,getting the details of DiskFormat Type,,"{ +/rtask/rest/list/,"fetching the list of jobs when requested with ordering param as ""status""",,"{ ""status"" : 200, -""message"" : DiskFormat type list -}","def test_library_dformattypes(run_api): +""response"" : listed jobs in ascending order +}","def test_rtask_list_status(rtask_list_status): """""" - Getting the list of disk format types + Listing the status of rtasks """""" - r = run_api.library_disk_format_type() - result = r.json() - test_assert.status(result, LIBRARY_DISK_FORMAT_TYPE, ""library_dformattypes"") + params, r = rtask_list_status test_assert.status(r, 200) " -/library/rest/dformattypes/,getting the details of DiskFormat Type when requested with invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_library_dformattypes_with_invalid_token(invalid_exec_api): +/rtask/rest/list/,fetching the list of jobs,,"{ +""status"" : 200, +""response"" : listed jobs +}","def test_rtask_list(rtask_list): """""" - Getting the list of disk format types + Fetching the List of Jobs """""" - r = invalid_exec_api.library_disk_format_type() - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) + r = rtask_list + test_assert.status(r, 200) " -/library/rest/dformattypes/,getting the details of DiskFormat Type without Authorization,,"{ +​/rtask​/rest​/rlist​/,fetching the list of remote tasks with filters,,"{ ""status"" : 200, -""message"" : DiskFormat type list -}","def test_library_dformattypes_without_authorization(anonymous_exec_api): +""response"" : Remote Task listed +}","PARAMETERS = [ + {""page"": 1, ""page_size"": 5}, + {""search"": ""finished""}, + {""ordering"": ""mtime""}, + {""ordering"": ""-mtime""}, + {""ordering"": ""status""}, + {""ordering"": ""-status""}, + {""ordering"": ""job_type""}, + {""ordering"": ""-job_type""} +] + +@pytest.mark.parametrize(""filter"", PARAMETERS) +def test_rtask_rlist_with_filter(run_api, filter): """""" - Getting the list of disk format types + Fetching the List of Jobs based on filter """""" - r = anonymous_exec_api.library_disk_format_type() - result = r.json() - test_assert.status(result, LIBRARY_DISK_FORMAT_TYPE, ""library_dformattypes"") + r = run_api.rtask_rlist(filter) test_assert.status(r, 200) " -/library/rest/dtypes/,getting DiskBus Type list When Requested,,"{ +​/rtask​/rest​/rlist​/,fetching the list of remote tasks with customized filters,,"{ ""status"" : 200, -""message"" : DiskBus type list -}","def test_library_dtypes(run_api): +""response"" : Filtered remote task listed +}","@pytest.mark.skip(reason=""cannot validate the remote tasks"") +def test_rtask_rlist_filter(run_api): """""" - Getting the list of disk type + Fetching the List of Jobs by adding filters """""" - r = run_api.library_disk_type() - result = r.json() - test_assert.status(result, LIBRARY_DISK_TYPE, ""library_boottypes"") - test_assert.status(r, 200) + servers = [server[""hostname""] for server in run_api.server_list().json()[""results""]] + random_server = randint(0, 2) + owner_filter = {""user"": choice([1, 2, 3])} + task_for_filter = {""task_for"": servers[random_server]} + task_on_filter = {""task_on"": servers[random_server]} + status_filter = {""status"": choice(['created', 'delegated', 'started', 'finished', + 'failed', 'cancel', 'cancelling', 'cancelled'])} + search_filter = {""search"": choice([""Refresh"", ""BuildISOList"", ""DeleteRepoStoreFiles"", + DEFAULT_ADMIN_ACCOUNT['user'], DEFAULT_NON_ADMIN_ACCOUNT['user'], DEFAULT_MANAGER_ACCOUNT['user'], ""main"", ""mh"", ""mh-2""])} + filters = [owner_filter, task_for_filter, task_on_filter, status_filter, search_filter] + for filter in range(len(filters)): + r = run_api.rtask_rlist(filters[filter]) + test_assert.status(r, 200) " -/library/rest/dtypes/,getting DiskBus Type list when requested with invalid token,,"{ +​/rtask​/rest​/rlist​/,fetching the list of remote tasks when requested with invalid token,,"{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_library_dtypes_with_invalid_token(invalid_exec_api): +}","def test_rtask_rlist_with_invalid_token(invalid_exec_api): """""" - Getting the list of disk type + Fetching the List of Jobs with invalid token """""" - r = invalid_exec_api.library_disk_type() - result = r.json() + r = invalid_exec_api.rtask_rlist() + res = r.json() test_assert.status(r, 401) - assert result['detail'] == ""Invalid token."", ""|> The Error is {}"".format(result['detail']) + assert res['detail'] == ""Invalid token."" +" +​/rtask​/rest​/rlist​/,fetching the list of remote jobs / tasks using valid data,,"{ +""status"" : 200, +""response"" : Remote Task listed +}","def test_rtask_rlist(rtask_rlist): + """""" + Fetching the List of Jobs + """""" + r = rtask_rlist + test_assert.status(r, 200) +" +/server/rest/backup_complete/,"creating a backup complete token for the server using invalid token. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + 'token': 'invalid' +}","{ + ""status"" : 400, + ""message"" : ""Invalid token"" +}","def test_server_backup_complete_with_invalid_token(run_api): + """""" + testing server backup_complete using invalid token + """""" + params = { + 'token': 'invalid' + } + r = run_api.server_backup_complete(params) + rjson = r.json() + if run_api.user_type == 'admin': + test_assert.status(r, 400) + assert rjson['result'] == 'FAILURE', rjson + assert rjson['error'] == 'Invalid Token', rjson + else: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson " -/library/rest/dtypes/,getting DiskBus Type list when requested without authorization,,"{ -""status"" : 200, -""message"" : DiskBus type list -}","def test_library_dtypes_without_authorization(anonymous_exec_api): +/server/rest/backup_manifest/,"back-up manifest api operation of server using invalid token. check the user type before performing the operation, only admin user type have the permission to perform such operations. + ","{ +""token"" :""invalid"" +}","{ + ""status"" : 400, + ""message"" : ""Token Invalid"" +}","def test_server_backup_manifest_invalid_token(run_api): """""" - Getting the list of disk type without authorization + testing backup-manifest api using invalid token """""" - r = anonymous_exec_api.library_disk_type() - result = r.json() - test_assert.status(result, LIBRARY_DISK_TYPE, ""library_boottypes"") - test_assert.status(r, 200) + params = {""token"": ""invalid""} + r = run_api.server_backup_manifest(params) + rjson = r.json() + if run_api.user_type == 'admin': + test_assert.status(r, 400) + assert rjson['reason'] == 'Invalid Token', ""|> Json %s"" % rjson + else: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson " -/library/rest/edit/{UUID}/,"edition of details when UUID exists and it doesn't have next revision. Check the user type before performing the operation. -",,"{ -""status"" : 201, -""response"" : Details updated -}"," -endpoint = ""lib_edit"" -PARAMETERS = [{""dest_obj"": OBJ_LIB}] - - -@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) -@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) -def test_library_edit(run_api, custom_lib_admin_operations, custom_lib_non_admin_operations): +/server/rest/backup_manifest/,"back-up manifest api operation of server using empty string token. check the user type before performing the operation, only admin user type have the permission to perform such operations. + ","{ +""token"" :"""" +}","{ + ""status"" : 400, + ""message"" : ""Token required"" +}","@pytest.mark.skip(""Skipping this because it returns status code :- 500 "") +def test_server_backup_manifest_empty_token(run_api): """""" - Editing the details of VM + testing backup-manifest api using empty string token """""" - if run_api.arch_type == ""aarch64"": - params, r = run_api.library_add_new_vm(arch=""aarch64"", type=""virtio"", port=""vda"") - else: - params, r = run_api.library_add_new_vm() + params = {""token"": """"} + r = run_api.server_backup_manifest(params) rjson = r.json() - lib_id = r.json()[""UUID""] - if run_api.arch_type == ""aarch64"": - dist_add_param = {""type"": ""virtio"", ""port"": ""vdz""} + if run_api.user_type == 'admin': + test_assert.status(r, 400) + assert rjson['error'] == 'Token Required', ""|> Json %s"" % rjson else: - dist_add_param = {} - r = run_api.library_edit(lib_id, params={""hw"": {""disks"": template_library_edit_disk_add(**dist_add_param)}}) - test_assert.status(params, rjson, ""library_edit"") - test_assert.status(r, 201) - - if 'error' not in rjson.keys(): - UUID = rjson[""UUID""] - run_api.library_delete(UUID, params) - - # Adding non_admin check of Editing a Library Image created by different user - if run_api.user_type == USER_TYPE[""non_admin""]: - lib_id = custom_lib_admin_operations - r = run_api.library_edit(lib_id, {""hw"": {}}) test_assert.status(r, 403) - - # Adding a Manager check of Editing a deployment info created by a user of his/her group - # and also when it's not the case - if run_api.user_type == USER_TYPE[""manager""]: - # When the user is not part of the group that the manager manages - lib_id = custom_lib_admin_operations - r = run_api.library_edit(lib_id, {""hw"": {}}) - test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) - - # When the user is part of the group that the manager manages - lib_id = custom_lib_non_admin_operations - r = run_api.library_edit(lib_id, {""hw"": {}}) - test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson " -/library/rest/edit/{UUID}/,updation of arch param of library,,"{ +/server/rest/set_commitable_ram/,"setting server id to commitable_ram_percent which is greater than 100 for a server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + 'commitable_ram_percent': 150 +}","{ ""status"" : 400, -""message"" : ""Architecture of a Machine cannot be modified."" -}","def test_library_edit_arch(library_add_new_vm, run_api): +""message"" : ""commitable_ram_percent should be less than or equal to 100"" +} +"," +def test_server_set_commmitable_ram_commitable_ram_percent_is_greater_than_100(run_api): """""" - Edit the architecture of vm + server set commmitable ram is greater than 100 """""" - p, r = library_add_new_vm - lib_id = r['UUID'] - params = {'hw': {'arch': 'aarch64'}} - res = run_api.library_edit(lib_id, params) - test_assert.status(res, 400) - rjson = res.json() - assert rjson['error'] == ""Architecture of a Machine cannot be modified."", ""|> The error is %s"" % rjson + params = { + 'commitable_ram_percent': 150 + } + r = run_api.server_set_commmitable_ram('invalid', params) + if run_api.user_type != 'admin': + test_assert.status(r, 403) + rjson = r.json() + rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + rjson['error'] == ""commitable_ram_percent should be less than or equal to 100"", ""|> json %s"" % rjson " -/library/rest/edit/{UUID}/,updation of cdrom in a library,"cdrom = [ - { - ""type"": ""ide"", - ""is_boot"": False - } - ] - - -updated_cdrom = [ - { - ""type"": ""sata"", - ""is_boot"": False - } - ] +/server/rest/set_commitable_ram/,"setting negative value to commitable_ram _percent for server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ -""status"" : 201 -}","def test_library_edit_cdrom(run_api): + 'commitable_ram_percent': -1 +}","{ +""status"" : 400, +""message"" : ""commitable_ram_percent should be greater than 0"" +} +","def test_server_set_commmitable_ram_commitable_ram_percent_is_negative(run_api): """""" - update cdrom with valid data + server set commmitable ram is negative """""" - cdrom = [ - { - ""type"": ""ide"", - ""is_boot"": False - } - ] - p, r = run_api.library_add_new_vm(cdrom=cdrom) - lib_id = r.json()['UUID'] - updated_cdrom = [ - { - ""type"": ""sata"", - ""is_boot"": False - } - ] - params = {'hw': {'cdrom': updated_cdrom}} - res = run_api.library_edit(lib_id, params) - test_assert.status(res, 201) - rjson = res.json() - for cdrom in rjson['hw']['cdrom']: - assert cdrom['type'] == 'sata', ""|> Json %s"" % rjson - assert cdrom['is_boot'] is False, ""|> Json %s"" % rjson - run_api.library_delete(lib_id) + params = { + 'commitable_ram_percent': -1 + } + r = run_api.server_set_commmitable_ram('invalid', params) + if run_api.user_type != 'admin': + test_assert.status(r, 403) + rjson = r.json() + rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + rjson['error'] == ""commitable_ram_percent should be greater than 0"", ""|> json %s"" % rjson " -/library/rest/edit/{UUID}/,updation of disk when invalid UUID provided,,"{ +/server/rest/set_commitable_ram/,"setting invalid server id to commitable_ram_percent for a server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + 'commitable_ram_percent': 100 +}","{ ""status"" : 404, -""message"" : ""Disk with UUID does not exist"" -}","def test_library_edit_invalid_disk_UUID(library_add_new_vm, run_api): +""message"" : ""Sever not found"" +} +","def test_server_set_commmitable_invalid_id(run_api): """""" - update disk with invalid UUID + server invalid server id """""" - p, r = library_add_new_vm - lib_id = r['UUID'] - disk_UUID = str(UUID.UUID4()) - # disk_UUID = 'invalid' it gives {'hw': {'disks': {'update': [{'UUID': ['Must be a valid UUID.']}]}}} - disks = {""update"": [ - { - ""UUID"": disk_UUID, - ""port"": ""sdz"", - ""type"": r['hw']['disks'][0]['type'] - } - ] + params = { + 'commitable_ram_percent': 100 } - params = {""hw"": {""disks"": disks}} - res = run_api.library_edit(lib_id, params) - test_assert.status(res, 404) - rjson = res.json() - assert rjson['error'] == f""Disk with UUID {disk_UUID} does not exist"", ""|> json %s"" % rjson + r = run_api.server_set_commmitable_ram('invalid', params) + if run_api.user_type != 'admin': + test_assert.status(r, 403) + rjson = r.json() + rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + else: + test_assert.status(r, 404) + rjson = r.json() + rjson['error'] == ""Server not found"", ""|> json %s"" % rjson " -/library/rest/edit/{UUID}/,updation of disks in a library,"disks = {""update"": [ - { - ""UUID"": r['hw']['disks'][0]['UUID'], - ""port"": ""sdz"", - ""type"": r['hw']['disks'][0]['type'] - } - ] +/server/rest/set_commitable_ram/,"setting invalid server id to commitable_ram_percent for a server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 400, +""message"" : ""commitable_ram_percent is required"" +} +","def test_server_set_commmitable_without_params(run_api): + """""" + server with set commmitable ram + """""" + r = run_api.server_set_commmitable_ram('invalid', {}) + if run_api.user_type != 'admin': + test_assert.status(r, 403) + rjson = r.json() + rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + rjson['error'] == ""commitable_ram_percent is required"", ""|> json %s"" % rjson +" +/server/rest/test_connection/,testing the connection to the server with invalid port,"{ + ""ip"" + ""port"": 70000, + ""username"", + ""password"", }","{ -""status"" : 201 -}","def test_library_edit_update_disk(library_add_new_vm, run_api): +""status"" : 400, +""message"" : ""Ensure this value is less than or equal to 65535"" +} +","def test_server_test_connection_invalid_port(run_api): """""" - Update disk + Testing the Connection to the Server with invalid port """""" - p, r = library_add_new_vm - lib_id = r['UUID'] - disks = {""update"": [ - { - ""UUID"": r['hw']['disks'][0]['UUID'], - ""port"": ""sdz"", - ""type"": r['hw']['disks'][0]['type'] - } - ] + params = { + ""ip"": run_api.node_ip, + ""port"": 70000, + ""username"": DEFAULT_ROOT_ACCOUNT[""user""], + ""password"": DEFAULT_ROOT_ACCOUNT[""password""] } - params = {""hw"": {""disks"": disks}} - r = run_api.library_edit(lib_id, params) - test_assert.status(r, 201) - rjson = r.json() - assert rjson['hw']['disks'][0]['port'] == 'sdz', ""|> json %s"" % rjson + r = run_api.server_test_connection(params=params) + test_assert.status(r, 400) + res = r.json() + assert 'FAILURE' in res[""result""], res + assert ""Ensure this value is less than or equal to 65535"" in res[""error""], res " -/library/rest/edit/{UUID}/,updation of disks in a library using the size param,"disks = {""update"": [ - { - ""UUID"": r['hw']['disks'][0]['UUID'], - ""port"": ""sdz"", - ""type"": r['hw']['disks'][0]['type'], - ""size"": 5 - } - ] - } -","{ -""status"" : 400, -""message"" : """"Modifying the disk size during Library Edit is not permitted"", -}"," -def test_library_edit_disk_size_param(library_add_new_vm, run_api): +/server/rest/test_connection/,testing the connection to the server with incorrect port number,"{ + ""ip"" + ""port"": 424, + ""username"", + ""password"", + }","{ +""status"" : 200, +""message"" : ""Unable to connect to port""} +","def test_server_test_connection_incorrect_port(run_api): """""" - Update disk with 'size' param + Testing the Connection to the Server with incorrect port """""" - p, r = library_add_new_vm - lib_id = r['UUID'] - disks = {""update"": [ - { - ""UUID"": r['hw']['disks'][0]['UUID'], - ""port"": ""sdz"", - ""type"": r['hw']['disks'][0]['type'], - ""size"": 5 - } - ] + params = { + ""ip"": run_api.node_ip, + ""port"": 424, + ""username"": DEFAULT_ROOT_ACCOUNT[""user""], + ""password"": DEFAULT_ROOT_ACCOUNT[""password""] } - params = {""hw"": {""disks"": disks}} - r = run_api.library_edit(lib_id, params) - test_assert.status(r, 400) - rjson = r.json() - rjson['error'] == ""Modifying the disk size during Library Edit is not permitted"", ""|> json %s"" % rjson + r = run_api.server_test_connection(params=params) + test_assert.status(r, 200) + result = r.json() + assert result[""ssh""][""success""] is False, result + assert ""Unable to connect to port"" in result[""ssh""][""error""], result " -/library/rest/edit/{UUID}/,updation of library when requested with invalid token,"{ - lib_id = ""doesnotexits"" -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_library_edit_with_invalid_token(invalid_exec_api): +/server/rest/test_connection/,testing the connection to the server,"{ + ""ip"" + ""port"": 22, + ""username"", + ""password"", + }","{ +""status"" : 200, +""response"" :success +}","def test_server_test_connection(run_api): """""" - with invalid token + Testing the Connection to the Server """""" - lib_id = ""doesnotexits"" - r = invalid_exec_api.library_edit(lib_id, {""hw"": {}}) - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) + params = { + ""ip"": run_api.node_ip, + ""port"": 22, + ""username"": DEFAULT_ROOT_ACCOUNT[""user""], + ""password"": DEFAULT_ROOT_ACCOUNT[""password""] + } + r = run_api.server_test_connection(params=params) + test_assert.status(r, 200) + result = r.json() + assert result[""ssh""][""success""] == 1, result " -/library/rest/edit/{UUID}/,updation of library when UUID exists and it has next revision,,"{ -""status"" : 403, -""message"" : ""Next Revision Exists , Edit Permission Not Allowed"" -}","def test_library_edit_with_revision_exists(library_add_new_vm, run_api): +/server/rest/test_connection/,testing th using invalid credentials,"{ + ""ip"" + ""port"": 22, + ""username"": ""invalid"", + ""password"":""invalid"", + }","{ +""status"" : 200, +""message"" : ""Authentication failed"" +}","def test_server_test_connection_invalid_credentials(run_api): """""" - When machine with UUID Does Not Exist + Testing the Connection to the Server with invalid credentials """""" - parmas, r = library_add_new_vm - lib_id = r['UUID'] - res = run_api.deploy_image(lib_id=lib_id) - deploy_id = res.json()['UUID'] - revision = run_api.deploy_snapshot(deploy_id=deploy_id) - edit_r = run_api.library_edit(lib_id, {""hw"": {}}) - edit_rjson = edit_r.json() - test_assert.status(edit_r, 403) - assert edit_rjson['result'] == ""Next_revision Exists: Edit permission not allowed"", "">| The error message is %s"" % (edit_rjson['result']) - run_api.deploy_image_delete(deploy_id, {}) - revision_id = revision.json()['snapshotted_machine_UUID'] - run_api.library_delete(revision_id) -" -/library/rest/edit/{UUID}/,updation of library with network type changed to host and segment is set to Default Public Segment,"networks = [{ - ""type"": ""host"", - ""model"": ""virtio"", - ""segment"": ""HostOnly Segment"", + params = { + ""ip"": run_api.node_ip, + ""port"": 22, + ""username"": ""invalid"", + ""password"": ""invalid"" } - ] - -update_netork = [{ - ""type"": ""bridge"", - ""model"": ""virtio"", - ""segment"": ""HostOnly Segment"", - }] -","{ -""status"" : 400, -""message"" : ""Network type `bridge` can only be connected to `Default Public Segment`, your provided input for segment is `HostOnly Segment`."" -}","def test_library_edit_with_network_type_bridge_segment_HostOnly(run_api): + r = run_api.server_test_connection(params=params) + test_assert.status(r, 200) + result = r.json() + assert result[""ssh""][""success""] is False, result + assert result[""ssh""][""error""] == ""Authentication failed."", result +" +/servers/rest/add/,"adding new server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 201 +}","def test_server_add(run_api, server_add_new): """""" - Library update with network type host and segment Default Public Segment + Add Server """""" - networks = [{ - ""type"": ""host"", - ""model"": ""virtio"", - ""segment"": ""HostOnly Segment"", - } - ] - params, r = run_api.library_add_new_vm(networks=networks) - update_netork = [{ - ""type"": ""bridge"", - ""model"": ""virtio"", - ""segment"": ""HostOnly Segment"", - }] - params = {'hw': {'networks': update_netork}} - lib_id = r.json()[""UUID""] - res = run_api.library_edit(lib_id, params) - test_assert.status(res, 400) - rjson = res.json() - assert rjson['error'] == ""Network type `bridge` can only be connected to `Default Public Segment`, your provided input for segment is `HostOnly Segment`."", ""|> Ther error is %s"" % rjson - run_api.library_delete(lib_id, {}) - - + template, result = server_add_new + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(result, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(result, 201) " -/library/rest/edit/{UUID}/,updation of library with network type host and segment Default Public Segment,"networks = [{ - ""type"": ""bridge"", - ""model"": ""virtio"", - ""segment"": ""Default Public Segment"", - } - ] - - -update_netork = [{ - ""type"": ""host"", - ""model"": ""virtio"", - ""segment"": ""Default Public Segment"", - }] -","{ -""status"" : 400, -""message"" :""Network type `host` can only be connected to `HostOnly Segment`, your provided input for segment is `Default Public Segment`."" -}","def test_library_edit_with_network_type_host_segment_default_public(run_api): +/servers/rest/backup-token/,"creating a backup token for the server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 201, + ""response"" : success +}"," +def test_server_backup_token(run_api): """""" - Library update with network type host and segment Default Public Segment + create a backup token for the server """""" - networks = [{ - ""type"": ""bridge"", - ""model"": ""virtio"", - ""segment"": ""Default Public Segment"", - } - ] - params, r = run_api.library_add_new_vm(networks=networks) - update_netork = [{ - ""type"": ""host"", - ""model"": ""virtio"", - ""segment"": ""Default Public Segment"", - }] - params = {'hw': {'networks': update_netork}} - lib_id = r.json()[""UUID""] - res = run_api.library_edit(lib_id, params) - test_assert.status(res, 400) - rjson = res.json() - assert rjson['error'] == ""Network type `host` can only be connected to `HostOnly Segment`, your provided input for segment is `Default Public Segment`."", ""|> Ther error is %s"" % rjson - run_api.library_delete(lib_id, {}) - + r = run_api.server_backup_token() + rjson = r.json() + if run_api.user_type == 'admin': + test_assert.status(r, 201) + assert ""token"" in rjson, rjson + else: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson " -/library/rest/edit/{UUID}/,updation of library without Authorization,"{ - lib_id = ""doesnotexits"" +/servers/rest/bulkops/,performing bulk operations on non-existing servers where valid operation is requested,"{ + ""server_list"": ['invalid list'], + ""op"": 'valid' }","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_library_edit_without_authorization(anonymous_exec_api): +""status"" : 400, +""message"" : ""Server does not exists"" +}","def test_sever_bulkops_invalid_server_id(skip_if_not_admin, run_api): """""" - without authorization + invalid server id """""" - lib_id = ""doesnotexits"" - r = anonymous_exec_api.library_edit(lib_id, {""hw"": {}}) - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) + SERVER_BULK_OPS = ['syncrepo', 'delete'] + for ops in SERVER_BULK_OPS: + bulkops = { + ""server_list"": 'invalid', + ""op"": ops + } + r = run_api.server_bulkops(bulkops) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['failure'][0]['error'] == 'Server does not exist', ""|> Json %s"" % rjson " -/library/rest/edit/{UUID}/,updation of network in a library with invalid mac,"networks = [{ - ""type"": ""bridge"", - ""model"": ""virtio"", - ""segment"": ""Default Public Segment"", - } - ] - -update_network = [{ - ""mac"": ""invalid"" - }] - - +/servers/rest/bulkops/,"performing bulk operations on multiple existing servers where valid operation is requested. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ + ""server_list"": ['valid list'], + ""op"": 'valid' +}","{ +""status"" : 202, +""response"" : success +}","@pytest.mark.parametrize(""operation"", SERVER_BULK_OPS, indirect=True) +def test_server_bulkops(run_api, server_bulkops, operation): + """""" + Bulk Operations in Server + """""" + r = server_bulkops + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 202) + +" +/servers/rest/bulkops/,performing bulk operations on multiple existing servers where invalid operation is requested,"{ + ""server_list"": ['valid list'], + ""op"": 'invalid' +}","{ ""status"" : 400, -""message"" : ""MAC address is not correct"" -}","def test_library_edit_network_invalid_mac(run_api): +""message"" : ""Unsupported operation. Available options are: [ 'syncrepo', 'delete', 'upgrade', 'lock_server', 'unlock_server', 'mark_for_maintenance', 'unmark_for_maintenance' ]"" +}","def test_server_bulkops_invalid_operation(skip_if_not_admin, run_api): """""" - update network with invalid mac + invalid bulkops operation """""" - networks = [{ - ""type"": ""bridge"", - ""model"": ""virtio"", - ""segment"": ""Default Public Segment"", + bulkops = { + ""server_list"": 'invalid', + ""op"": 'invalid' } - ] - params, r = run_api.library_add_new_vm(networks=networks) - update_netork = [{ - ""mac"": ""invalid"" - }] - params = {'hw': {'networks': update_netork}} - lib_id = r.json()[""UUID""] - res = run_api.library_edit(lib_id, params) - test_assert.status(res, 400) - rjson = res.json() - assert rjson['error'] == ""MAC address `invalid` is not correct"", ""|> Json %s"" % rjson - run_api.library_delete(lib_id, {}) -" -/library/rest/edit/{UUID}/,updation of serialport in a library,"serialports = [{ - ""source_type"": ""pty"", - ""target_type"": ""isa-serial"", - }] - - -updated_serialports = [{ - ""source_type"": ""pty"", - ""target_type"": ""pci-serial"", - }] - + r = run_api.server_bulkops(bulkops) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Unsupported operation. Available options are: ['syncrepo', 'delete', 'upgrade', 'lock_server', 'unlock_server', 'mark_for_maintenance', 'unmark_for_maintenance']"", ""|> Json %s"" % rjson" +/servers/rest/bulkops/,"performing api bulk operations on server using empty list of server _list.Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ -""status"" : 201 -}","def test_library_edit_serialport(run_api): + ""server_list"": [], + ""op"": 'mark_for_maintenance' +}","{ + ""status"" : 400, + ""message"" : ""server_list cannot be null or empty"" +}","def test_server_bulkops_empty_server_list(run_api): """""" - update serialport + testing server bulkops api using params as empty server list """""" - serialports = [{ - ""source_type"": ""pty"", - ""target_type"": ""isa-serial"", - }] - p, r = run_api.library_add_new_vm(serialports=serialports) - lib_id = r.json()['UUID'] - updated_serialports = [{ - ""source_type"": ""pty"", - ""target_type"": ""pci-serial"", - }] - params = {'hw': {'serialports': updated_serialports}} - res = run_api.library_edit(lib_id, params) - test_assert.status(res, 201) - rjson = res.json() - for serialport in rjson['hw']['serialports']: - assert serialport['source_type'] == 'pty', ""|> Json %s"" % rjson - assert serialport['target_type'] == 'pci-serial', ""|> Json %s"" % rjson - run_api.library_delete(lib_id) + bulkops = { + ""server_list"": [], + ""op"": 'mark_for_maintenance' + } + r = run_api.server_bulkops(bulkops) + rjson = r.json() + if run_api.user_type == 'non-admin': + test_assert.status(r, 403) + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + assert rjson[""result""] == ""FAILURE"", rjson + assert ""server_list cannot be null or empty"" in rjson[""error""], rjson " -​/library​/rest​/hvmtypes​/,fetching the hypervisor type when requested with invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_library_hvmtypes_with_invalid_token(invalid_exec_api): +/servers/rest/delete/{{UUID}}/,requesting to delete server by searching with valid data for an existing deployment,"{ + 'search': server_name +}","{ +""status"" : 400, +""message"" : ""Cannot delete a server while deployment exists"" +}","def test_server_delete_while_deployments_exits(skip_if_not_admin, deploy_image, run_api): """""" - with invalid token + delete a server while deployments exist """""" - r = invalid_exec_api.library_hvmtypes() - test_assert.status(r, 401) + p, r = deploy_image + server_name = r.json()['server'] + params = { + 'search': server_name + } + res = run_api.server_list(params).json() + server_id = res['results'][0]['uuid'] + r = run_api.server_delete(server_id) + test_assert.status(r, 400) rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) + assert rjson['error'] == 'Cannot delete a server while deployments exist', ""|> Json %s"" % rjson " -​/library​/rest​/hvmtypes​/,fetching the hypervisor type when requested without Authorization,,"{ -""status"" : 200, -""response"" : list of hypervisor type -}","def test_library_hvmtypes_without_authorization(anonymous_exec_api): +/servers/rest/delete/{{UUID}}/,"requesting to delete server by searching with invalid server_id.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +server_id = ""invalid"" +}","{ +""status"" : 404, +}","def test_server_delete_invalid_id(run_api): """""" - without authorization + invalid server id """""" - r = anonymous_exec_api.library_hvmtypes() - result = r.json() - test_assert.status(result, LIBRARY_HVM_TYPE, ""library_hvmtypes"") - test_assert.status(r, 200) + server_id = 'invalid' + r = run_api.server_delete(server_id) + if run_api.user_type == 'admin': + test_assert.status(r, 404) + rjson = r.json() + assert rjson['error'] == 'Delete: Server not found', ""|> Json %s"" % rjson + else: + test_assert.status(r, 403) + rjson = r.json() + rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson " -/library/rest/layerdetail/{UUID}/,fetching the layer details of an existing machine,,"{ -""status"" : 200, -""response"" : details of layer -}","def test_library_layerdetail(library_layerdetail): +/servers/rest/delete/{{UUID}}/,"deleting server using invalid uuid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +""server_id"" :'invalid' +}","{ + ""status"" : 404, + ""message"" : ""Delete : server not found"" +}","def test_server_delete_invalid_id(run_api): """""" - Getting the detail of layer + invalid server id """""" - template, r = library_layerdetail - result = r.json() - test_assert.status(result, template, ""library_layerdetail"") - test_assert.status(r, 200) + server_id = 'invalid' + r = run_api.server_delete(server_id) + if run_api.user_type == 'admin': + test_assert.status(r, 404) + rjson = r.json() + assert rjson['error'] == 'Delete: Server not found', ""|> Json %s"" % rjson + else: + test_assert.status(r, 403) + rjson = r.json() + rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson " -/library/rest/layerdetail/{UUID}/,fetching the layer details using a lib_id but with invalid token,"{ - lib_id = ""doesnotexits"" -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_library_layerdetail_with_invalid_token(invalid_exec_api): +/servers/rest/delete/{{UUID}}/,"deleting a server when its status is set to ""online"". Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +""status"": ""Online"", +""total_machine"": 0 +} +","{ + ""status"" : 400, + ""message"" : ""Cannot delete a Server which is in Online state"" +}","def test_server_delete_status_online(run_api): """""" - with invalid token + delete a server when it's status is Online """""" - lib_id = ""doesnotexits"" - r = invalid_exec_api.library_layerdetail(lib_id, params={}) - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) -" -/library/rest/layerdetail/{UUID}/,fetching the layer details using a lib_id for which there is no existing machine,"{ - lib_id = ""doesnotexits"" + params = {""status"": ""Online"", ""total_machine"": 0} + _, server_list = run_api.filter_servers_matching_with_criteria(params, list(run_api.clm_my_servers.values())) + if server_list: + r = run_api.server_delete(server_list[0]) + rjson = r.json() + if run_api.user_type == 'admin': + test_assert.status(r, 400) + assert ""Cannot delete a Server which is in Online state"" in rjson[""error""], rjson + else: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson +" +/servers/rest/detail/{{UUID}}/,getting details of server of non-existing id,"{ +server_id = ""invalid"" }","{ ""status"" : 404, -""response"" : Machine with UUID does not exist -}","def test_library_layerdetails_with_invalid_uid(run_api): +""message"" : ""Not found"" +}","def test_server_details_invalid_uuid(run_api): """""" - when machine with UUID does not exists. + fetch server details with invalid uuid """""" - lib_id = ""doesnotexits"" - r = run_api.library_layerdetail(lib_id, params={}) + server_id = 'invalid' + r = run_api.server_details(server_id) test_assert.status(r, 404) -" -/library/rest/layerdetail/{UUID}/,fetching the layer details using a lib_id without Authorization,"{ - lib_id = ""doesnotexits"" -}","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_library_layerdetail_without_authorization(anonymous_exec_api): - """""" - without authorization - """""" - lib_id = ""doesnotexits"" - r = anonymous_exec_api.library_layerdetail(lib_id, params={}) - test_assert.status(r, 401) rjson = r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) - - + assert rjson['detail'] == 'Not found.', ""|> json %s"" % rjson " -/library/rest/layerlist/, requesting with invalid token to get the list of layer from the library,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_library_layer_list_with_invalid_token(invalid_exec_api): +/servers/rest/detail/{{UUID}}/,getting details of server of existing id,,"{ +""status"" : 200 +}","def test_server_details(server_details): """""" - with invalid token + Getting details of Server """""" - r = invalid_exec_api.library_layer_list() - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) + template, r = server_details + result = r.json() + test_assert.status(template, result, ""server_details"") + test_assert.status(r, 200) " -/library/rest/layerlist/, requesting without Authorization to get the list of layer from library ,,"{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_library_layer_list_without_authorization(anonymous_exec_api): +/servers/rest/list/,getting the list of servers using invalid group id,"{ + 'group_id': invalid_group_id +}","{ +""response"" : failure +}","def test_server_list_by_invalid_group_id(run_api): """""" - without authorization + fetch server list by group """""" + group_id = 0 + params = { + 'group_id': group_id + } + res = run_api.server_list(params).json() + assert res['count'] == 0, ""|> Json %s"" % res - r = anonymous_exec_api.library_layer_list() - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) " -/library/rest/layerlist/,requesting to get the list of layer from library ,,"{ -""status"" : 200, -""response"" : list of layer -}","def test_library_layer_list(run_api): +/servers/rest/list/,getting the list of servers using group name,"{ + 'group_name': group_name +}","{ +""response"" : server list +}","def test_server_list_by_group_name(run_api): """""" - Getting the list of layer + fetch server list by group name """""" - r = run_api.library_layer_list() - test_assert.status(r, 200) + group_name = rand_string(10) + params = { + 'group_name': group_name + } + res = run_api.server_list(params).json() + assert res['count'] == 0, ""|> Json %s"" % res " -/library/rest/list/,filtering the list of library details based on created and updated DateTime Filter,,,"def test_library_filter_timefilter(run_api: apiops, library_add_new_vm): +/servers/rest/list/,getting the list of servers using group id,"{ + 'group_id': group_id +}","{ +""response"" : server list +}","def test_server_list_by_group_id(skip_if_not_admin, group_add, run_api): """""" - Filter on created and update DateTime Filter + fetch server list by group id """""" - template, rjson = library_add_new_vm - lib_id = rjson[""UUID""] - # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' - str_ctime = rjson['ctime'].replace('T', ' ').replace('Z', '') - datetime_ctime = convert_datetime_stringform(rjson['ctime']) - - def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): - """""" - Function to handle corner case if library image was created a day before and test get triggered on new day - """""" - if not utc: - created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' - created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, - ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 - # Filter on UTC time - # .... When the datetime is selected to be the same as in detail - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, - ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""page_size"": 1}).json()['count'] == 0 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), - ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""page_size"": 1}).json()['count'] == 1 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), - ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 - # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # .........When the created_date_range format is invalid - response = run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) - test_assert.status(response, 400) - assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() - - # .........When the created_start_date and created_end_date has white spaces in them - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""page_size"": 1}).json()['count'] == 1 - - # Filter on list time - # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 - # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 - # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 0 - # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 1 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on week's last day at 23:59:59.9999999 list and test get triggered on new week at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on month's last day at 23:59:59.9999999 list and test get triggered on new month at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on year last day at 23:59:59.9999999 list and test get triggered on new year at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # .........When the created_date_range format is invalid - response = run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""invalid"", ""page_size"": 1}) - test_assert.status(response, 400) - assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() - # .........When the created_start_date and created_end_date has white spaces in them - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", - ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 - - + params, r = group_add + rjson = r.json() + group_id = rjson['id'] + group_name = rjson['name'] + servers_list = { + ""servers_list"": list(run_api.clm_my_servers.values()) + } + run_api.group_add_server(servers_list, group_id) + params = { + 'group_id': group_id + } + servers = run_api.server_list(params).json() + for server in servers['results']: + server_details = run_api.server_details(server['uuid']).json() + server_in_groups = [group['name'] for group in server_details['groups']] + assert group_name in server_in_groups, ""|> Json %s"" % server_details +" +/servers/rest/list/,getting the list of servers excluding some servers using server UUID,"{ + 'uuid': server_uuid +}","{ +""response"" : server list +}","def test_server_list_by_uuid(run_api): + """""" + fetch server list based on server uuid + """""" + server_list = run_api.server_list().json() + server_uuid = choice([server['uuid'] for server in server_list['results']]) + params = { + 'uuid': server_uuid + } + result = run_api.server_list(params).json() + for server in result['results']: + assert server['uuid'] == server_uuid, ""|> json %s"" % server " -/library/rest/list/,getting list of vm present in library by filtering it based on created and update DateTime,,"{ -""status"" : 400 -}","def test_library_filter_timefilter(run_api: apiops, library_add_new_vm): +/servers/rest/list/,getting the list of servers excluding some servers using search parameter,"{ + 'search': hostname +}","{ +""response"" : server list +}","def test_server_list_by_search(run_api): """""" - Filter on created and update DateTime Filter + fetch server list based on search params """""" - template, rjson = library_add_new_vm - lib_id = rjson[""UUID""] - # utime and ctime is returned in the form '2023-09-14T17:59:39.173594Z' which needs conversion to '2023-09-14 17:59:39.173594' - str_ctime = rjson['ctime'].replace('T', ' ').replace('Z', '') - datetime_ctime = convert_datetime_stringform(rjson['ctime']) - - def handle_trigger_delay_filtering_for_created_on(created_start_date, created_end_date, created_date_range, utc=True): - """""" - Function to handle corner case if library image was created a day before and test get triggered on new day - """""" - if not utc: - created_start_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' - created_end_date = convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": created_start_date, ""created_end_date"": created_end_date, - ""created_date_range"": created_date_range, ""page_size"": 1}).json()['count'] == 1 - # Filter on UTC time - # .... When the datetime is selected to be the same as in detail - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, - ""created_end_date"": str_ctime, ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""page_size"": 1}).json()['count'] == 0 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), - ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""page_size"": 1}).json()['count'] == 1 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), - ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'created_start_date' and 'created_end_date' when passed blank string - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": """", ""created_end_date"": """", ""page_size"": 1}).json()['count'] == 1 - # ........Filter on 'created_start_date' and 'created_end_date' when created_start_date is greater than created_end_date - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=1)), - ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(microseconds=-1)), ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 and test get triggered at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on week's last day at 23:59:59.9999999 and test get triggered on new week at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on month's last day at 23:59:59.9999999 and test get triggered on new month at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on year last day at 23:59:59.9999999 and test get triggered on new year at 00:00:00.0000000 - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"") - # .........When the created_date_range format is invalid - response = run_api.library_list({""UUID"": lib_id, ""created_start_date"": str_ctime, ""created_end_date"": str_ctime, ""created_date_range"": ""invalid"", ""page_size"": 1}) - test_assert.status(response, 400) - assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() - - # .........When the created_start_date and created_end_date has white spaces in them - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": "" "" + str_ctime + "" "", ""created_end_date"": "" "" + str_ctime + "" "", ""page_size"": 1}).json()['count'] == 1 - - # Filter on list time - # .... When the datetime is selected to be the same as in detail but having TimeZone of +0530 - # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_start_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 - # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 0 - # ........ When the datetime is selected to be the same as in detail but with tzone list, i.e., timedelta being 19800 equivalent to +05:30 but increase microsecond by 1 to fail filter - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 1 - # ......> Filter on 'created_end_date' alone, when start_date has 1 microsecond increased and 1 microsecond decreased when on TimeZone of +0530 - # ........ When the datetime is selected a 1 microsecond more than from the detail - assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 1 - # ........ When the datetime is selected a 1 microsecond less than from the detail - assert run_api.library_list({""UUID"": lib_id, ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800, microseconds=-1)) + '+05:30', - ""page_size"": 1}).json()['count'] == 0 - # ........Filter on 'created_start_date', 'created_end_date' and 'created_date_range'. - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'today - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""today"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'yesterday - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""yesterday"", ""page_size"": 1}).json()['count'] == 0 - except AssertionError: - # when machine is created yesterday at 23:59:59.9999999 list and test get triggered at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'week - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""week"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on week's last day at 23:59:59.9999999 list and test get triggered on new week at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'month - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on month's last day at 23:59:59.9999999 list and test get triggered on new month at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # ........When the 'created_start_date' and 'created_end_date' are same as in the detail and 'created_date_range' is passed as'year' - try: - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""year"", ""page_size"": 1}).json()['count'] == 1 - except AssertionError: - # when machine is created on year last day at 23:59:59.9999999 list and test get triggered on new year at 00:00:00.0000000 list - handle_trigger_delay_filtering_for_created_on(created_start_date=str_ctime, created_end_date=str_ctime, created_date_range=""yesterday"", utc=False) - # .........When the created_date_range format is invalid - response = run_api.library_list({""UUID"": lib_id, ""created_start_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', ""created_end_date"": convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30', - ""created_date_range"": ""invalid"", ""page_size"": 1}) - test_assert.status(response, 400) - assert response.json()['created_date_range'][0] == 'Select a valid choice. invalid is not one of the available choices.', ""The json is %s"" % response.json() - # .........When the created_start_date and created_end_date has white spaces in them - assert run_api.library_list({""UUID"": lib_id, ""created_start_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", ""created_end_date"": "" "" + convert_datetime_stringform(datetime_ctime + timedelta(seconds=19800)) + '+05:30' + "" "", - ""created_date_range"": ""month"", ""page_size"": 1}).json()['count'] == 1 + server_list = run_api.server_list().json() + hostname = choice([server['hostname'] for server in server_list['results']]) + params = { + 'search': hostname + } + result = run_api.server_list(params).json() + for server in result['results']: + assert server['hostname'] == hostname, ""|> json %s"" % server " -/library/rest/list/,getting the list of VM present in the library,,"{ -""status"" : 200, -""response"" : ""list of VM -}","def test_library_list(library_list): +/servers/rest/list/,getting the list of servers excluding some servers using group_name,"{ +exclude_group_name = [server_list_to_exclude] +}","{ +""response"" : server list +}","def test_server_list_by_excluding_group_name(run_api): """""" - Getting the list of VM present in the library + fetch server list by excluding_group name """""" - template, r = library_list - test_assert.status(r, template, ""library_list"", ""name"") - test_assert.status(r, 200) + group_name = rand_string(10) + params = { + 'exclude_group_name': group_name + } + res = run_api.server_list(params).json() + assert res['count'] == 0, ""|> Json %s"" % res " -/library/rest/list/,requesting the list of VM present in the library with invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_library_list_with_invalid_token(invalid_exec_api): +/servers/rest/list/,getting the list of servers excluding some servers using group_id,"{ +exclude_group_id = [server_list_to_exclude] +}","{ +""response"" : server list +}","def test_server_list_by_excluding_group(skip_if_not_admin, run_api, group_add): """""" - with invalid token + fetch server list by excluding group """""" - r = invalid_exec_api.library_list(params={}) - test_assert.status(r, 401) + p, r = group_add rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) + params = { + 'exclude_group_id': rjson['id'] + } + # New added group does not have any server so it will return all the server list + servers1 = run_api.server_list(params).json() + # Fetching the server list + servers2 = run_api.server_list().json() + assert servers1['count'] == servers2['count'] " -/library/rest/list/,requesting the list of VM present in the library without Authorization,,"{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_library_list_without_authorization(anonymous_exec_api): +/servers/rest/list/,getting the list of servers,,"{ +""status"" : 200, +""response"" : server list +}","def test_server_list(server_list): """""" - without authorization + Getting the list Server """""" - r = anonymous_exec_api.library_list(params={}) - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) + r = server_list + test_assert.status(r, SERVER_LIST, ""server_list"", ""hostname"") + test_assert.status(r, 200) +" +/servers/rest/list/,fetching the server list by setting the scope parameter,"{ +""scope"" : ""my"" +}","{ +""status"" :200 +}","def test_server_list_by_scope(run_api): + """""" + fetch server list using scope :- 'my' + """""" + params = { + 'scope': ""my"" + } + res = run_api.server_list(params) + test_assert.status(res, 200)" +/servers/rest/list/,fetching the server list by setting the replication status,"{ +""status"" :""installing"" +} +",,"def test_server_list_by_installation_status(run_api): + """""" + fetch server list by replication status + """""" + params = { + 'status': ""Installing"" + } + res = run_api.server_list(params).json() + assert res['count'] == 0, ""|> Json %s"" % res " -/library/rest/nmodeltypes/,requesting the types of NetworkModel without Authorization,,"{ -""status"" : 200, -""response"" : Types of Network Model -}","def test_library_nmodeltypes_without_authorization(anonymous_exec_api): +/servers/rest/syncrepo/,"syncing the layers on server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" :201, +""reponse"" : success +}","@pytest.mark.skip(reason=""having issue in this testcase"") +def test_server_syncrepo(run_api, server_syncrepo): """""" - without authorization + Sync the layers on server """""" - r = anonymous_exec_api.library_nmodeltypes() - test_assert.status(r, 200) - result = r.json() - test_assert.status(result, LIBRARY_NETWORK_MODEL_TYPE, ""library_nmodeltypes"") + r = server_syncrepo + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 201) " -/library/rest/nmodeltypes/,requesting the types of NetworkModel using with invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_library_nmodeltypes_with_invalid_token(invalid_exec_api): +/servers/rest/syncrepo/{{UUID}},"syncing layers on server using existing UUID and server is running.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 403 / 201 +}","def test_server_syncrepo(run_api, server_syncrepo): """""" - with invalid token + Sync the layers on server """""" - r = invalid_exec_api.library_nmodeltypes() - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) + r = server_syncrepo + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 201) " -/library/rest/nmodeltypes/,updation of cd rom in a library,,"{ -""status"" : 200, -""response"" : Types of Network Model -}","def test_library_nmodeltypes(run_api): +/servers/rest/upgradeserver/{UUID}/,"updating server using valid existing data.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 201, +""response"" : server updated +}","def test_server_upgradeserver(run_api, server_upgradeserver): """""" - Getting the type of Network Model + Updating sever """""" - r = run_api.library_nmodeltypes() - result = r.json() - test_assert.status(result, LIBRARY_NETWORK_MODEL_TYPE, ""library_nmodeltypes"") - test_assert.status(r, 200) + r = server_upgradeserver + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 201) " -/library/rest/ntypes/,requesting the network type list,,200: Network Type List,"def test_library_ntypes(run_api): +/servers/rest/upgradeserver/{UUID}/,"updating server using invalid data.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +server_id = ""invalid"" +}","{ +""status"" : 404, +""message"" : ""Server not found"" +}","def test_server_upgrade_invalid_uuid(run_api): """""" - Getting the list of Network type + server upgrade with invalid server id """""" - r = run_api.library_ntypes() - result = r.json() - test_assert.status(result, LIBRARY_NETWORK_TYPE, ""library_ntypes"") - test_assert.status(r, 200) + server_id = 'invalid' + r = run_api.server_upgrade(server_id) + if run_api.user_type == 'admin': + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == 'Server Upgrade API isn\'t implemented. Use ServerBulkOps with ""upgrade"" as operation to upgrade Managed Hosts', ""|> Json %s"" % rjson + else: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson " -/library/rest/ntypes/,fetching the list of ntypes of library when requested with invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_library_ntypes_with_invalid_token(invalid_exec_api): +/shares/rest/add/{{UUID}}/,adding new object to vm ,,"{ +""status"" : 201, +""response"" : success +}","def test_shares_add(shares_add): """""" - with invalid token + Adding new object to the vm """""" - r = invalid_exec_api.library_ntypes() - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) + template, r = shares_add + test_assert.status(r, 201) " -/library/rest/revisions/,requesting the revision list of library,,"{ -""status"" : 200, -""response"" : Revision list of library -}","def test_library_revisions(library_revisions): +/shares/rest/list/,fetching the shares list of machine,,"{ +""status"" : 201, +""response"" : success +}","def test_shares_list(shares_list): """""" - revision list of library + Fetch list of shares of machine """""" - params, r = library_revisions + r = shares_list test_assert.status(r, 200) " -/library/rest/revisions/,requesting the revision list of library when invalid token provided,"{ -machine_UUID : 'doesnotexits' +/tags/rest/add/{UUID}/,adding tag without authorization,"{ +vm_uuid = ""valid"" }","{ ""status"" : 401, -""message"" : ""Invalid token"" -}","def test_library_revisions_with_invalid_token(invalid_exec_api): +""message"" : ""Authentication credentials were not provided."" +}","def test_tags_add_without_authorization(anonymous_exec_api): """""" - with invalid token + without authorization """""" - r = invalid_exec_api.library_revisions('doesnotexits') + vm_uuid = ""invalid"" + p, r = anonymous_exec_api.tag_add(vm_uuid,) test_assert.status(r, 401) rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) -" -/library/rest/revisions/,requesting the revision list of library when machine with the provided UUID does not exist,"{ -machine_UUID : 'doesnotexits' -}","{ -""status"" : 404, -""message"" : ""Machine with given UUID does not exist"" -}","def test_library_revisions_with_invaild_UUID(run_api): - """""" - library revision machine does not exist - """""" - r = run_api.library_revisions('doesnotexits') - test_assert.status(r, 404) - rjson = r.json() - assert rjson['detail'] == ""Machine with given UUID does not exist"", ""|> The error message is %s"" % rjson + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) " -/library/rest/revisions/,requesting the revision list of library without Authorization,"{ -machine_UUID : 'doesnotexits' +/tags/rest/add/{UUID}/,adding tag using invalid token,"{ +vm_uuid = ""invalid"" }","{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_library_revisions_without_authorization(anonymous_exec_api): +""message"" : ""Invalid token"" +}","def test_tags_add_invalid_token(invalid_exec_api): """""" - without authorization + invalid token """""" - r = anonymous_exec_api.library_revisions('doesnotexits') + + vm_uuid = ""invalid"" + p, r = invalid_exec_api.tag_add(vm_uuid) test_assert.status(r, 401) rjson = r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) -" -/library/rest/segment_list/,fetching of segment list from library,,"{ -""status"" :200, -""response"" :success -}","def test_library_segmentlist(library_segmentlist): - """""" - fetch segment list - """""" - params, r = library_segmentlist - test_assert.status(r, 200) + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) " -/library/rest/segment_list/,fetch segment list for library for brigde type of NIC,"{ -'nic_type': 'bridge' -}",,"def test_library_segment_with_nic_type(library_add_new_vm, run_api): +/tags/rest/add/{UUID}/,adding tag for valid existing UUID without name attribute in body,"{ +""tag_list"" = + [ + { + ""value"": ""494"", + ""description"": ""test"" + } + ] +}","{ +""status"" : 400, +""response"" : ""This field is required"" +}","def test_tags_add_without_name(run_api, library_add_new_vm): """""" - Fetch library segment with nic type + add tag without name """""" - p, res = library_add_new_vm - params = {'nic_type': 'bridge'} - r1 = run_api.library_segmentlist(params).json() - for segment in r1['results']: - assert segment['network_type'] == 'public' - params = {'nic_type': 'host'} - r2 = run_api.library_segmentlist(params).json() - for segment in r2['results']: - assert segment['network_type'] == 'hostOnly' + params, r = library_add_new_vm + vm_uuid = r['uuid'] + params = {""tag_list"": [{""value"": ""494"", ""description"": ""test""}]} + tag_params, result = run_api.tag_add(vm_uuid, params) + test_assert.status(result, 400) + rjson = result.json() + msg = rjson['tag_list'][0]['name'][0] + assert msg == ""This field is required."", ""The error message is %s"" % (msg) " -/library/rest/segment_list/,"fetch segment list for library by setting search parameter to ""host""","{ -search : ""host"" -}",,"def test_library_segmentlist_with_search_param(library_add_new_vm, run_api): - """""" - fetch segmentlist with search params - """""" - p, r = library_add_new_vm - params = {'search': 'host'} - r = run_api.library_segmentlist(params).json() - for segment in r['results']: - segment['network_type'] == 'hostOnly' -" -/library/rest/segment_list/,"fetch segment list for library by setting the ""network_type"" parameter","{ -""network_type"" :hostOnly -}",,"def test_library_segmentlist_with_network_type(library_add_new_vm, run_api): +/tags/rest/add/{UUID}/,adding tag for valid existing UUID with empty name attribute in the body,"{ +""tag_list"" = + [ + { + ""name"" : """" + ""value"": ""494"", + ""description"": ""test"" + } + ] +}","{ +""status"" : 400, +""response"" : ""This field may not be blank"" +}","def test_tags_add_with_empty_name(run_api, library_add_new_vm): """""" - fetch segmentlist with network type + add tag with empty name """""" - p, r = library_add_new_vm - params = {'network_type': 'hostOnly'} - r = run_api.library_segmentlist(params).json() - for segment in r['results']: - segment['network_type'] == 'hostOnly' + params, r = library_add_new_vm + vm_uuid = r['uuid'] + + params = {""tag_list"": [{""name"": """", ""value"": ""494"", ""description"": ""test""}]} + tag_params, result = run_api.tag_add(vm_uuid, params) + test_assert.status(result, 400) + rjson = result.json() + msg = rjson['tag_list'][0]['name'][0] + assert msg == ""This field may not be blank."", ""The error message is %s"" % (msg) " -/library/rest/upload_disk/{UUID}/,uploading disk when disk_UUID provided is invalid,,"{ -""status"" : 404, -""message"" : ""Upload Disk: Disk not found"" -}","def test_library_upload_disk_invalid_disk_UUID(library_add_new_vm, run_api): +/tags/rest/add/{UUID}/,adding tag for valid existing UUID of machine,,"{ +""status"" : 201, +""response"" : added tag +}","def test_tags_add(run_api, library_add_new_vm): """""" - Invalid disk UUID + add tag with valid data """""" - p, r = library_add_new_vm - lib_id = r['UUID'] - disk_UUID = ""invalid"" - r = run_api.library_upload_disk(lib_id, disk_UUID) - test_assert.status(r, 404) - rjson = r.json() - assert rjson['error'] == 'Upload Disk: Disk not found', ""json %s"" % rjson + params, r = library_add_new_vm + vm_uuid = r['uuid'] + + # Add Tag + tag_params, result = run_api.tag_add(vm_uuid) + test_assert.status(result, 201) + res = run_api.tag_list(tag_params, filter_search={""object_uuid"": vm_uuid}) + results = res.json()[""results""] + tag = results[-1] + r = run_api.tag_delete(id=tag[""id""], params={}) " -/library/rest/upload_disk/{UUID}/,uploading disk when machine UUID provided is invalid,"{ -lib_id = ""invalid"", -disk_UUID = ""invalid"" -} -","{ +/tags/rest/add/{UUID}/,adding tag for invalid UUID of machine,"{ +vm_uuid = ""invalid"" +}","{ ""status"" : 404, -""message"" : ""Upload Disk: Machine not found"" -}","def test_library_upload_disk_invalid_machine_UUID(run_api): +""response"" : ""No object with given uuid"" +}","def test_tags_add_with_invalid_uuid(run_api): """""" - Invalid machine UUID + add tag with invalid uuid """""" - lib_id = ""invalid"" - disk_UUID = ""invalid"" - r = run_api.library_upload_disk(lib_id, disk_UUID) - test_assert.status(r, 404) - rjson = r.json() - assert rjson['error'] == 'Upload Disk: Machine not found', ""json %s"" % rjson + vm_uuid = ""invalid"" + + tag_params, result = run_api.tag_add(vm_uuid,) + test_assert.status(result, 404) + rjson = result.json() + assert rjson['error'] == ""No object with given uuid"", ""The error message is %s"" % (rjson['error']) " -/library/rest/upload_disk/{UUID}/,uploading disk when the disk size does not match,,"{ -""status"" : 400, -""message"" : ""Disk size mismatch."" -}","def test_library_upload_disk_mismatch_disk_size(library_add_new_vm, run_api): +/tags/rest/delete/{id}/,requesting to delete tag without authorization,"{ +tag_id = id +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_tags_delete_without_authorization(anonymous_exec_api): """""" - Mismatch disk size + without authorization """""" - p, r = library_add_new_vm - lib_id = r['UUID'] - disk_UUID = r['hw']['disks'][0]['UUID'] - r = run_api.library_upload_disk(lib_id, disk_UUID) - test_assert.status(r, 400) + tag_id = 0 + r = anonymous_exec_api.tag_delete(id=tag_id, params={}) + test_assert.status(r, 401) rjson = r.json() - assert re.match(r'Disk size mismatch. Uploaded disk size: (\d+), old disk size: (\d+)', rjson['error']), ""json %s"" % rjson + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) " -/library/rest/viewmachinelist/,getting the list of machines when requested,,"{ -""status"" : 200, -""response"" : Machine Details list -}","def test_library_viewmachinelist(run_api): +/tags/rest/delete/{id}/,"requesting to delete tag with valid existing id and the tag is not in [session_id, session_created_on, session name, deployment ip, deployment mac]",,"{ +""status"" : 204, +""response"" : tag deleted +}","def test_tags_delete(library_add_new_vm, run_api): """""" - getting the list of machines + tag delete """""" - r = run_api.library_viewmachinelist() - test_assert.status(r, 200) + params, r = library_add_new_vm + vm_uuid = r['uuid'] + tag_params, result = run_api.tag_add(vm_uuid) + res = run_api.tag_list(tag_params, filter_search={""object_uuid"": vm_uuid}) + results = res.json()[""results""] + newtag = [tag for tag in results if tag['name'] not in ('_sessionid', '_session_created_on')][0] + r = run_api.tag_delete(id=newtag[""id""], params={}) + test_assert.status(r, 204) " -/library/rest/viewmachinelist/,getting the list of machines when requested using the search parameter.,params = { 'search': 'machine' },"{ -""status"" : 200, -""response"" : Machine Details list -}","def test_library_viewmachinelist_search_parameter(run_api): +/tags/rest/delete/{id}/,"requesting to delete tag with valid existing id and the tag is in [session_id, session_created_on, session name, deployment ip, deployment mac]",,"{ +""status"" : 400, +""message"" : ""Delete not allowed"" +}","def test_tags_delete_with_undeletable_tag(library_add_new_vm, run_api): """""" - provide search parameter + tag delete tags are '_sessionid', '_session_created_on' """""" - params = { - 'search': 'machine' - } - r = run_api.library_viewmachinelist(params) - test_assert.status(r, 200) + params, r = library_add_new_vm + vm_uuid = r['uuid'] + tag_params, result = run_api.tag_add(vm_uuid) + res = run_api.tag_list(tag_params, filter_search={""object_uuid"": vm_uuid}) + results = res.json()[""results""] + newtag = [tag for tag in results if tag['name'] in ('_sessionid', '_session_created_on')][0] + r = run_api.tag_delete(id=newtag[""id""], params={}) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['result'] == ""Delete not allowed"", ""The error message is %s"" % (rjson['result']) " -/library/rest/viewmachinelist/,getting the list of machines when requested with invalid token,,"{ +/tags/rest/delete/{id}/,requesting to delete tag using invalid token,"{ +tag_id = id +}","{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_library_viewmachinelist_with_invalid_token(invalid_exec_api): +}"," +def test_tags_delete_invalid_token(invalid_exec_api): """""" - with invalid token + invalid token """""" - r = invalid_exec_api.library_viewmachinelist() + tag_id = 0 + r = invalid_exec_api.tag_delete(id=tag_id, params={}) test_assert.status(r, 401) rjson = r.json() assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) " -/library/rest/viewmachinelist/,getting the list of machines when requested with param - page_size and page no,"params = { - 'page_size': 1, - 'page_no': 1 - }","{ -""status"" : 200, -""response"" : Machine Details list -}","def test_library_viewmachinelist_with_page_size_and_page_no(run_api): +/tags/rest/delete/{id}/,requesting to delete tag using invalid tag_id,"{ +tag_id = id +}","{ +""status"" : 404, +""message"" : ""Tag does not exist"" +}","def test_tags_delete_with_invalid_id(run_api): """""" - provide page_size and page_no + tag delete invalid id """""" - params = { - 'page_size': 1, - 'page_no': 1 - } - r = run_api.library_viewmachinelist(params) - test_assert.status(r, 200) + tag_id = 0 + r = run_api.tag_delete(id=tag_id, params={}) + test_assert.status(r, 404) + rjson = r.json() + assert rjson['error'] == ""Tag does not exist"", ""The error message is %s"" % (rjson['error']) " -/library/rest/viewmachinelist/,getting the list of machines when requested with params - ram_min and ram_max,"params = { 'ram_min': 0, 'ram_max': 10000 }","{ -""status"" : 200, -""response"" : Machine Details list -}","def test_library_viewmachinelist_with_min_and_max_ram(run_api): +/tags/rest/list/,requesting to fetch list of tags without authorization,"{ +'page' = 1, +'page_size' = 1 +}","{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided."" +}","def test_tags_list_without_authorization(anonymous_exec_api): """""" - provide ram_min and ram_max params + without authorization """""" - params = { - 'ram_min': 0, - 'ram_max': 10000 - } - r = run_api.library_viewmachinelist(params) - test_assert.status(r, 200) + r = anonymous_exec_api.tag_list({}, {}) + test_assert.status(r, 401) + rjson = r.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) " -/library/rest/viewmachinelist/,getting the list of machines when requested with params- disk_size_min and disk_size_max,"params = { - 'disk_size_min': 0, - 'disk_size_max': 10000 - }","{ +/tags/rest/list/,requesting to fetch list of tags with Page and Page Size,"{ +'page' = 1, +'page_size' = 1 +}","{ ""status"" : 200, -""response"" : Machine Details list -}","def test_library_viewmachinelist_with_min_and_max_disk(run_api): +""message"" : tag list for specific page +}","def test_tags_list_with_page_and_pagesize(run_api): """""" - provide disk_size_min and disk_size_max params + when requested with page and page size """""" - params = { - 'disk_size_min': 0, - 'disk_size_max': 10000 - } - r = run_api.library_viewmachinelist(params) - test_assert.status(r, 200) -" -/library/rest/viewmachinelist/,getting the list of machines when requested without Authorization,,"{ + params = {'page': 1, 'page_size': 1} + r = run_api.tag_list(params, {}) + test_assert.status(r, 200)" +/tags/rest/list/,requesting to fetch list of tags with invalid token,"{ +'page' = None, +'page_size' = None +}","{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_library_viewmachinelist_without_authorization(anonymous_exec_api): +""message"" : ""Invalid token"" +}"," +def test_tags_list_invalid_token(invalid_exec_api): """""" - without authorization + invalid token """""" - r = anonymous_exec_api.library_viewmachinelist() + r = invalid_exec_api.tag_list({}, {}) test_assert.status(r, 401) rjson = r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) + assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) " -/library/rest/viewmachinelist/,"getting the list of machines where ""scope"" param is set to public","params = { - ""scope"": 'public' - }","{ -""status"" : 200, -""response"" : Machine Details list -}","def test_library_viewmachinelist_with_public_scope(run_api): +/user/rest/add-group/{id}/,"requesting to add user to non-existing group. Check the user type before performing the operation. +","{ +user_id = ""valid"" , +group_names = ""invalid_group_list"" +}","{ +""status"" : 403 / 404, +""message"" : ""Group matching query does not exist."" +}","def test_user_add_group_invalid_grp_name(run_api, admin_exec_api): """""" - provide ""scope"" as public + Adding user into invalid group name """""" - params = { - ""scope"": 'public' + groups_name = { + 'groups': ['0'] + } + user_result = admin_exec_api.user_list() + res = user_result.json() + user_ids = [result['id'] for result in res['results']] + user_id = random.choice(user_ids) + template, r = run_api.user_add_group(user_id, groups_name) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == ""Provided groups must be a list of Group's UUIDs."" + test_assert.status(r, 400) +" +/user/rest/add-group/{id}/,"requesting to add user to group using valid user_id where group name provided is an integer instead of string. Check the user type before performing the operation. +","{ +user_id = ""valid"" , +group_names = ""invalid_group_list_datatype"" +}","{ +""status"" : 403 / 400, +""response"" : ""Provided group names must be a list of strings"" +}","def test_user_add_group_invalid_data(run_api, admin_exec_api): + """""" + Provide integer instead of string in group name list + """""" + groups_name = { + 'groups': [1] } - r = run_api.library_viewmachinelist(params) - test_assert.status(r, 200) + user_result = admin_exec_api.user_list() + res = user_result.json() + user_ids = [result['id'] for result in res['results']] + user_id = random.choice(user_ids) + template, r = run_api.user_add_group(user_id, groups_name) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + res = r.json() + assert res['error'] == ""Provided groups must be a list of Group's UUIDs."", ""|> Json %s"" % res + test_assert.status(r, 400) + " -/profile/rest/get/,"fetching list of profiles. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -""status"" : 200, -""response"" : list of profiles -}","def test_profile_list(run_api, profile_list): +/user/rest/add-group/{id}/,requesting to add user to group using invalid user_id.Check the user type before performing the operation.,"{ +user_id = ""invalid"" , +group_names = ""valid_group_list"" +}","{ +""status"" : 403 / 404 +}","PARAMETERS = [{""action"": GROUP_ADD}] + +@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_user_add_group_invalid_user_id(run_api, custom_group_admin_operations): """""" - Fetch list of all profiles + Adding invalid user id into group """""" - r = profile_list + params, r = custom_group_admin_operations + res = r.json() + group_name = { + 'groups': [res['name']] + } + template, r = run_api.user_add_group(user_id=0, groups=group_name) if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: test_assert.status(r, 403) elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 200) + test_assert.status(r, 404) " -/profile/rest/get/,fetching list of profiles using invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_profile_list_with_invalid_token(invalid_exec_api): +/user/rest/add-group/{id}/,"requesting to add user to existing group using valid id.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +user_id = ""valid"" , +group_names = ""valid_group_list"" +}","{ +""status"" : 403 / 201, +""response"" : success +}","def test_user_add_group(run_api, user_add_group): """""" - Fetch list of all profiles with invalid token + Adding multiple users into group """""" - r = invalid_exec_api.profile_list() - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Invalid token."" + r = user_add_group + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 201) " -/profile/rest/get/,fetching list of profiles without authorization,,"{ +/user/rest/add-group/{id}/,requesting to add user to existing group using valid id but without authorization,"{ +user_id = ""valid"" , +group_names = ""valid_group_list"" +}","{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided."" -}","def test_profile_list_without_authorization(anonymous_exec_api): +""message"" : ""Authentication credentials were not provided"" +}","def test_user_add_group_without_authorization(anonymous_exec_api): """""" - Fetch list of all profiles without authorization + Adding user into group without authorization """""" - r = anonymous_exec_api.profile_list() + groups_name = { + 'groups': ['0'] + } + template, r = anonymous_exec_api.user_add_group(user_id=0, groups=groups_name) res = r.json() test_assert.status(r, 401) assert res['detail'] == ""Authentication credentials were not provided."" " -/profile/rest/self/,fetching details of self profile,,"{ -""status"" : 200, -""response"" : Self profile details -}","def test_profile_self(run_api, profile_self): - """""" - Fetching details of self profile - """""" - r = profile_self - res = r.json() - assert res['username'] == run_api.user - test_assert.status(r, 200) -" -/profile/rest/self/,fetching details of self profile with invalid token,,"{ +/user/rest/add-group/{id}/,requesting to add user to existing group using valid id but invalid token,"{ +user_id = ""valid"" , +group_names = ""valid_group_list"" +}","{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_profile_self_with_invalid_token(invalid_exec_api): +}","def test_user_add_group_with_invalid_token(invalid_exec_api): """""" - Fetching details of self profile with invalid token + Adding user into group with invalid token """""" - r = invalid_exec_api.profile_self() + groups_name = { + 'groups': ['0'] + } + template, r = invalid_exec_api.user_add_group(user_id=0, groups=groups_name) res = r.json() test_assert.status(r, 401) assert res['detail'] == ""Invalid token."" " -/profile/rest/self/,fetching details of self profile without authorization,,"{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided."" -}"," -def test_profile_self_without_authorization(anonymous_exec_api): +/user/rest/change_ownership/,"changing ownership of user where the owner is valid but destination user does not exist. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + owner = 'colama' + dest_user = 'doesnotexistuser' +}","{ +""status"" : 400, +""message"" : ""Either User owner or dest_user does not exist..."" +} +","def test_user_change_ownership_user_doesnot_exits(run_api): """""" - Fetching details of self profile without authorization + user does not exits """""" - r = anonymous_exec_api.profile_self() - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Authentication credentials were not provided."" + owner = 'colama' + dest_user = 'doesnotexistuser' + r = run_api.user_change_ownership(owner, dest_user) + if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + rjson = r.json() + test_assert.status(r, 400) + assert rjson['error'] == f""Either User '{owner}' or '{dest_user}' does not exist..."", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson " -/profile/rest/set_group/{user_id}/,"setting group to profile for invalid group names.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +/user/rest/change_ownership/,"changing ownership of user where the owner and destination user are the same. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ -groups = { - ""add"": [], - ""remove"": [invalid_group_name] -}, -user_id = id -} ","{ + owner = 'colama' + dest_user = 'colama' +}","{ ""status"" : 400, -""message"" : ""Group matching query does not exist"" -}","def test_profile_set_group_invalid_group_name(run_api, admin_exec_api): +""message"" : 'The dest_user and the owner should be different' +} +","def test_user_change_ownership_when_owner_and_dest_user_are_same(run_api): """""" - Set group to profile by invalid group name + user change_ownership when owner and dest user are same """""" - groups = { - ""add"": [], - ""remove"": [""invalid-group-name""] - } - r = admin_exec_api.profile_list() - res = r.json() - profile = random.choice(res) - id = profile['id'] - r = run_api.profile_set_group(user_id=id, params=groups) - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + owner = 'colama' + dest_user = 'colama' + r = run_api.user_change_ownership(owner, dest_user) + if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + rjson = r.json() + test_assert.status(r, 400) + assert rjson['error'] == 'The dest_user and the owner should be different', ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE[""non_admin""]: test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: + rjson = r.json() + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson" +/user/rest/change_ownership/,"changing ownership of user where the destination user does not have right over the owner. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + owner = 'colama' + dest_user = 'manager' +}","{ +""status"" : 400, +""message"" : ""'manager' as a Manager user, does not have right over 'colama' or 'manager'"" +} +","def test_user_change_owner_doesnot_have_right(skip_if_admin, run_api): + """""" + user does not have right over user + """""" + owner = 'colama' + dest_user = 'manager' + r = run_api.user_change_ownership(owner, dest_user) + if run_api.user_type == USER_TYPE[""manager""]: rjson = r.json() test_assert.status(r, 400) - assert rjson['error'] == ""Group matching query does not exist."" + assert rjson['error'] == ""'manager' as a Manager user, does not have right over 'colama' or 'manager'"", ""|> Json %s"" % rjson + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson " -/profile/rest/set_group/{user_id}/,"setting group to profile for invalid ID. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +/user/rest/change_ownership/,"changing ownership of a user by a manager , where the manager does not have rights over the users. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ -groups = { - ""add"": [], - ""remove"": ""valid_group_name"" -}, -user_id = 0 -} ","{ +owner = ""vivekt"" +dest_user = ""manager"" +}","{ ""status"" : 400, -""response"" : failure -}","def test_profile_set_group_invalid_user_id(run_api): +""message"":""Manager doesn't have full right over the user. Make sure 'vivekt' doesn't have any deployment on the server that the 'manager' user as Manager doesn't handle"" +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_user_change_ownership_manager_does_not_have_deployment_server(skip_if_not_manager, run_api, custom_lib_non_admin_operations): """""" - Set group to profile by invalid user id + To test user_change_ownership endpoint when manager does not have full rights over the user """""" - groups = { - ""add"": [], - ""remove"": [random.choice(GROUPS)] - } - r = run_api.profile_set_group(user_id=0, params=groups) + _ = custom_lib_non_admin_operations + owner = ""vivekt"" + dest_user = ""manager"" + res = run_api.user_change_ownership(owner, dest_user) + test_assert.status(res, 400) + rjson = res.json() + assert rjson['error'] == ""Manager doesn't have full right over the user. Make sure 'vivekt' doesn't have any deployment on the server that the 'manager' user as Manager doesn't handle"", ""|> Json %s"" % rjson +" +/user/rest/detail/{id},"fetching the details of user. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 403 / 200 +}","def test_user_details(run_api, user_details): + """""" + Fetching the Details of User + """""" + params, r = user_details + res = r.json() if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: test_assert.status(r, 403) elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 400) + test_assert.status(params, res, ""user_details"") + test_assert.status(r, 200) " -/profile/rest/set_group/{user_id}/,"setting group to profile for valid User ID and valid group names.Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -""status"" : 201, -""response"" : success -}","def test_profile_set_group(profile_set_group, run_api): +/user/rest/detail/{id},fetching the details of user without authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_user_detail_without_token(anonymous_exec_api): + """""" + Fetching the user details without token + """""" + params, result = anonymous_exec_api.user_details() + r = result.json() + test_assert.status(result, 401) + assert r['detail'] == ""Authentication credentials were not provided."" +" +/user/rest/detail/{id},"fetching the details of user using valid id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +user_id = valid_user_id +}","{ +""status"" : 403 / 200 +}","def test_user_detail_with_valid_id(run_api): """""" - Set group to profile + Fetching the Details of User with valid id """""" - r = profile_set_group if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 403) + params, res = run_api.user_details(id=run_api.user_id) + test_assert.status(res, 403) + elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 201) + params, res = run_api.user_details(id=run_api.user_id) + test_assert.status(res, 200) + " -/profile/rest/set_group/{user_id}/,setting group to profile using invalid token,"{ -groups = { - ""add"": [], - ""remove"": ""valid_group_name"" -}, -user_id = id -} ","{ +/user/rest/detail/{id},fetching the details of user using invalid token,,"{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_profile_set_group_with_invalid_token(invalid_exec_api): +}","def test_user_details_with_invalid_token(invalid_exec_api): """""" - Set group to profile with invalid token + Fetching the details of the user using invalid token """""" - groups = { - ""add"": [], - ""remove"": [""valid-group-name""] - } - r = invalid_exec_api.profile_set_group(user_id = id, params = groups) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == 'Invalid token.' + params, result = invalid_exec_api.user_details() + r = result.json() + test_assert.status(result, 401) + assert r['detail'] == ""Invalid token."" " -/profile/rest/set_group/{user_id}/,setting group to profile without authorization,"{ -groups = { - ""add"": [], - ""remove"": ""valid_group_name"" -}, -user_id = id -} ","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided."" -}","def test_profile_set_group_without_authorization(anonymous_exec_api): +/user/rest/detail/{id},fetching the details of user using invalid id,"{ +user_id = ""invalid"" +}","{ +""status"" : 404 +}","def test_user_detail_with_invalid_id(run_api): """""" - Set group to profile without authorization + Fetching the details using invalid id + """""" - groups = { - ""add"": [], - ""remove"": [""valid-group-name""] - } - r = anonymous_exec_api.profile_set_group(user_id=id, params=groups) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == 'Authentication credentials were not provided.' + params, r = run_api.user_details(id=""invalid"") + test_assert.status(r, 404) + " -/rtask/rest/children/{UUID}/,"fetching the list of children jobs using valid data +/user/rest/list/,"fetching the list of users. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ",,"{ -""status"" : 200, -""response"" : Children Task listed -}","def test_rtask_list_children(rtask_list_children): +""status"" : 200/ 403, +""response"" : fetched list of users +}","def test_user_list(run_api, user_list): """""" - Fetching the List of children of a island deploy job + Fetching the List of User """""" - params, r = rtask_list_children - rjson = r.json() - test_assert.status(r, 200) - assert rjson[""count""] == len(params[""machines""][""add""]) - assert rjson[""results""][0][""type_name""] == ""Deploy"" + r = user_list + if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 200) + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) " -/rtask/rest/children/{UUID}/,fetching the list of children jobs when requested with invalid token,"{ -uuid = ""valid_uuid"" -}","{ +/user/rest/list/,fetching the list of users without authorization,,"{ +""status"" : 401, +""message"" : ""Authentication credentials were not provided"" +}","def test_user_list_without_token(anonymous_exec_api): + """""" + Fetch group list with unauthorized + """""" + r = anonymous_exec_api.group_list({}) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" +" +/user/rest/list/,fetching the list of users with invalid token,,"{ ""status"" : 401, ""message"" : ""Invalid token"" -}","def test_rtask_list_children_with_invalid_token(invalid_exec_api): +}","def test_user_list_with_invalid_token(invalid_exec_api): """""" - Fetching the List of childrens of a job with invalid token + Fetch group list with invalid token """""" - r = invalid_exec_api.rtask_list_children(""invalid-uuid"") + r = invalid_exec_api.group_list({}) res = r.json() test_assert.status(r, 401) assert res['detail'] == ""Invalid token."" " -/rtask/rest/children/{UUID}/,fetching the list of children jobs when requested with invalid UUID,"{ -uuid = ""invalid_uuid"" -}","{ -""status"" : 400 / 404, -""response"" : Bad request -}","def test_rtask_list_children_invalid_uuid(run_api): +/user/rest/list/,"fetching the list of users using the search param . Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 200/ 403, +""response"" : fetched list of users when search param provided +}","def test_user_list_with_search_params(run_api, user_list): """""" - Fetching the List of childrens of a job having invalid uuid + user list with search params """""" - r = run_api.rtask_list_children(""invalid-uuid"") - status_code = r.status_code - assert status_code in [400, 404] + if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + result = run_api.user_list(params={""search"": run_api.user}) + test_assert.status(result, 200) + elif run_api.user_type == USER_TYPE[""non_admin""]: + result = run_api.user_list(params={""search"": run_api.user}) + test_assert.status(result, 403) " -/rtask/rest/children/{UUID}/,fetching the list of children jobs without authorization,"{ -uuid = ""valid_uuid"" -}","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_rtask_list_children_without_authorization(anonymous_exec_api): +/user/rest/list/,"fetching the list of users using the group_id parameter. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 200/ 403, +""response"" : fetched list of users with the provided group_id +} ","def test_user_list_with_group_id(run_api, admin_exec_api): """""" - Fetching the List of childrens of a job without authorization + Fetch user list in a group with group-id """""" - r = anonymous_exec_api.rtask_list_children(""invalid-uuid"") - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Authentication credentials were not provided."" + params, r = admin_exec_api.group_add() + group_uid = r.json()[""id""] + if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + res = run_api.user_list(params={""group_id"": group_uid}) + test_assert.status(res, 200) + elif run_api.user_type == USER_TYPE[""non_admin""]: + res = run_api.user_list(params={""group_id"": group_uid}) + test_assert.status(res, 403) + r = admin_exec_api.group_delete(group_uid) " -/rtask/rest/delete/{UUID}/,"deleting task of invalid UUID. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ -uuid = ""invalid_uuid"" -}","{ -""status"" : 400 / 404, -""response"" : Bad request -}","def test_rtask_delete_invalid_uuid(run_api): +/user/rest/list/,"fetching the list of users using filters. Check the user type before performing the operation. +",,"{ +""status"" : 200/ 403, +} ","@pytest.mark.xfail +def test_user_list_filter(skip_if_invalid_groups, run_api, user_list): """""" - Deleting the task with invalid token + Fetching the List of User by filtering """""" - r = run_api.rtask_delete(""invalid-uuid"") - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + groups = skip_if_invalid_groups + group_filter = {""group_id"": choice(groups), ""page_size"": 10} + exclude_group_filter = {""exclude_group_id"": choice(groups), ""page_size"": 10} + is_manager_filter = {""is_manager"": choice([True, False]), ""page_size"": 10} + r = user_list + if run_api.user_type == USER_TYPE[""non_admin""]: test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - status_code = r.status_code - assert status_code in [404, 400] + elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + # expected result of users with exclude group filter + exclude_group_ids = [] + for i in r.json()[""results""]: + group_ids = [] + for j in i.get(""groups""): + group_ids.append(j.get(""id"")) + if exclude_group_filter.get(""exclude_group_id"") in group_ids and len(group_ids) - 1 >= 1: + exclude_group_ids.append(i.get(""id"")) + # expected result of users with is_manager filter + manager_check = [] + for i in r.json()[""results""]: + is_manager = [] + for j in i.get(""groups""): + is_manager.append(j.get(""is_manager"")) + if is_manager_filter.get(""is_manager"") is True and is_manager_filter.get(""is_manager"") in is_manager: + manager_check.append(True) + elif is_manager_filter.get(""is_manager"") is False and True not in is_manager: + manager_check.append(False) + exp_res = { + 0: [group_filter.get(""group_id"") for i in r.json()[""results""] for j in i.get(""groups"") if j.get(""id"") == group_filter.get(""group_id"")], + 1: exclude_group_ids, + 2: manager_check + } + filters = [group_filter, exclude_group_filter, is_manager_filter] + for filter in range(len(filters)): + params = filters[filter] + r = run_api.user_list(params) + # check for valid response data with the filter parameters + if r.json()[""count""] != len(exp_res[filter]): + logging.error(f""error in filter: {filters[filter]}, the list of expected result for the filter is: {exp_res[filter]}, and the actual result is {r.json()}"") + assert False + test_assert.status(r, 200) " -/rtask/rest/delete/{UUID}/,"deleting task of valid UUID.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +/user/rest/list/,"fetching the list of users setting the is_manager param set to True. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ",,"{ -""status"" : 403 / 204, -""response"" : Task deleted successfully -}","def test_rtask_delete(run_api, rtask_delete): +""status"" : 200/ 403, +} ","def test_user_list_is_manager_is_true(run_api): """""" - Deleting the task + fetch user list when is_manager is true """""" - r = rtask_delete - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + params = { + 'is_manager': True + } + r = run_api.user_list(params) + rjson = r.json() + if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 200) + for users in rjson['results']: + is_manager = [group[""is_manager""] for group in users['groups']] + assert True in is_manager, ""The error is %s"" % rjson + elif run_api.user_type == USER_TYPE[""non_admin""]: test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 204) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson " -/rtask/rest/delete/{UUID}/,deleting task when requested with invalid token,"{ -uuid = ""valid_uuid"" -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_rtask_delete_with_invalid_token(invalid_exec_api): +/user/rest/list/,"fetching the list of users setting the is_manager param set to False. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ +""status"" : 200/ 403, +} ","def test_user_list_is_manager_is_false(run_api): """""" - Deleting the task with invalid token + fetch user list when is_manager is false """""" - r = invalid_exec_api.rtask_delete(""valid-uuid"") - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Invalid token."" + params = { + 'is_manager': False + } + r = run_api.user_list(params) + rjson = r.json() + if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 200) + for users in rjson['results']: + is_manager = [group[""is_manager""] for group in users['groups']] + assert False in is_manager, ""The error is %s"" % rjson + elif run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson " -/rtask/rest/delete/{UUID}/,deleting task without authorization,"{ -uuid = ""valid_uuid"" -}","{ +/user/rest/logout,requesting to logout user without authorization,,"{ ""status"" : 401, ""message"" : ""Authentication credentials were not provided"" -}","def test_rtask_delete_without_authorization(anonymous_exec_api): +}","def test_user_logout_without_authorization(anonymous_exec_api): """""" - Deleting the task without authorization + Logout the user """""" - r = anonymous_exec_api.rtask_delete(""valid-uuid"") - res = r.json() + r = anonymous_exec_api.user_logout() + result = r.json() test_assert.status(r, 401) - assert res['detail'] == ""Authentication credentials were not provided."" + assert result['detail'] == ""Authentication credentials were not provided."" " -/rtask/rest/detail/{UUID}/,getting details of task for invalid uuid,"{ -uuid = ""invalid_uuid"" -}","{ -""status"" : 400, -""response"" : Bad request -}","@pytest.mark.skip(reason=""Skipping this test because it is returning 404 in place of 400"") -def test_rtask_details_invalid_uuid(run_api): +/user/rest/logout,requesting to logout user using invalid token,,"{ +""status"" : 401, +""message"" : ""Invalid token"" +}","def test_user_logout_with_invalid_token(invalid_exec_api): """""" - Getting details of Task by providing invalid uuid + Logout the user """""" - r = run_api.rtask_details(""invalid-uuid"") - # res = r.json() - test_assert.status(r, 400) + res = invalid_exec_api.user_logout() + result = res.json() + test_assert.status(res, 401) + assert result['detail'] == ""Invalid token."" " -/rtask/rest/detail/{UUID}/,getting details of task for valid uuid,,"{ +/user/rest/logout,requesting to logout user,,"{ ""status"" : 200, -""response"" : Details provided -}","def test_rtask_details(rtask_details): +""response"" : user logged out successfully +}","def test_user_logout(user_logout): """""" - Getting details of Specific Task + Logout the user """""" - params, r = rtask_details - res = r.json() - test_assert.status(res, params, ""rtask_details"", ""server"") + r = user_logout test_assert.status(r, 200) " -/rtask/rest/detail/{UUID}/,getting details of task with invalid token,"{ -uuid = ""valid_uuid"" -}","{ +/user/rest/self/,fetching the data of logged in user without authorization,,"{ ""status"" : 401, -""message"" : ""Invalid token"" -}","def test_rtask_details_with_invalid_token(invalid_exec_api): +""message"" : ""Authentication credentials were not provided"" +}","def test_user_self_without_authorization(anonymous_exec_api): """""" - Getting details of Task with invalid token + Fetching the data of logged in user without authorization """""" - r = invalid_exec_api.rtask_details(""valid-uuid"") - res = r.json() + r = anonymous_exec_api.user_self() + result = r.json() test_assert.status(r, 401) - assert res['detail'] == ""Invalid token."" + assert result['detail'] == ""Authentication credentials were not provided."" " -/rtask/rest/detail/{UUID}/,getting details of task without authorization,"{ -uuid = ""valid_uuid"" -}","{ +/user/rest/self/,fetching the data of logged in user using invalid token,,"{ ""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_rtask_details_without_authorization(anonymous_exec_api): +""message"" : ""Invalid token"" +}","def test_user_self_with_invalid_token(invalid_exec_api): """""" - Getting details of Task without authorization + Fetching the data of logged in user with invalid token """""" - r = anonymous_exec_api.rtask_details(""valid-uuid"") - res = r.json() + r = invalid_exec_api.user_self() + result = r.json() test_assert.status(r, 401) - assert res['detail'] == ""Authentication credentials were not provided."" + assert result['detail'] == ""Invalid token."" " -/rtask/rest/list/,fetching the list of jobs,,"{ -""status"" : 200, -""response"" : listed jobs -}","def test_rtask_list(rtask_list): +/user/rest/self/,fetching the data of logged in user,,200: should return object of user currently logged in,"def test_user_self(user_self): """""" - Fetching the List of Jobs + Fetching the data of logged in user """""" - r = rtask_list + r = user_self test_assert.status(r, 200) " -/rtask/rest/list/,"fetching the list of jobs when requested with ordering param as ""status""",,"{ -""status"" : 200, -""response"" : listed jobs in ascending order -}","def test_rtask_list_status(rtask_list_status): +audit/rest/list,getting the audit list without authorization,,"{""status"":401, +""message"":""Authentication credentials were not provided."" +}","def test_audit_list_without_authorization(anonymous_exec_api): """""" - Listing the status of rtasks + Audit list without authorization """""" - params, r = rtask_list_status - test_assert.status(r, 200) + r = anonymous_exec_api.audit_list() + result = r.json() + test_assert.status(r, 401) + assert result['detail'] == ""Authentication credentials were not provided.""" +ideploy/rest/add-tags,successful deployment operation when equal number of deployed islands and tags provided ,"{""island_list"": [""UUID1"", ""UUID2""], ""tags_list"": [[""tag1""], [""tag2""]]}","{""status"": 201, ""message"": ""Created""}","def test_ideploy_add_tags(ideploy_deploy, run_api): + """""" + ideploy add tags + """""" + p, r = ideploy_deploy + uuid = r.json()['deploy_uuid'] + tag_name = ""test_tag"" + params = { + ""island_list"": [ + uuid + ], + ""tags_list"": [ + [ + tag_name + ] + ] + } + r = run_api.ideploy_add_tag(params) + test_assert.status(r, 201) + island_detail = run_api.ideploy_details(uuid).json() + all_tags = [tag['value'] for tag in island_detail['tags']] + assert tag_name in all_tags, ""|> Json %s"" % island_detail " -/rtask/rest/list/,fetching the list of jobs without authorization,,"{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_rtask_list_without_authorization(anonymous_exec_api): +ideploy/rest/add-tags,"providing non-empty island_list and empty tags_list, expecting an error for not enough tags.","{""island_list"": [""UUID1""], ""tags_list"": []}","{""status"": 400, ""message"": ""Not enough tags provided.""}","def test_ideploy_add_tags_empty_island_list(run_api): """""" - Fetching the List of Jobs without authorization + invalid id """""" - r = anonymous_exec_api.rtask_list() - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Authentication credentials were not provided."" + params = {""island_list"": [""UUID1""], + ""tags_list"": [] + } + r = run_api.ideploy_add_tag(params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""{'island_list': [ErrorDetail(string='This list may not be empty.', code='empty')]}"", ""|> Json %s"" % rjson " -/rtask/rest/list/,fetching the list of jobs when requested with invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_rtask_list_with_invalid_token(invalid_exec_api): +ideploy/rest/add-tags,"providing more number of tags than islands, expecting an error of not enough islands to add tags","{""island_list"": [""UUID1""], ""tags_list"": [[""tag1""], [""tag2""]]}","{""status"": 400, ""message"": ""Not enough islands to add tags to.""}","def test_ideploy_add_tags_more_tag_count(run_api): """""" - Fetching the List of Jobs with invalid token + ideploy add tags more than island comut """""" - r = invalid_exec_api.rtask_list() - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Invalid token."" + tag_name = ""test_tag"" + params = { + ""island_list"": [ + ""invalid"" + ], + ""tags_list"": [ + [ + tag_name + ], + [ + tag_name + ] + ] + } + r = run_api.ideploy_add_tag(params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Not enough islands to add tags to."", ""|> Json %s"" % rjson " -/rtask/rest/list/,fetching the list of jobs with added filters,,"{ -""status"" : 200, -""response"" : listed jobs -}","PARAMETERS = [ - {""page"": 1, ""page_size"": 5}, - {""search"": ""finished""}, - {""ordering"": ""mtime""}, - {""ordering"": ""-mtime""}, - {""ordering"": ""status""}, - {""ordering"": ""-status""}, - {""ordering"": ""job_type""}, - {""ordering"": ""-job_type""} -] - -@pytest.mark.parametrize(""filter"", PARAMETERS) -def test_rtask_list_with_filter(run_api, filter): +ideploy/rest/add-tags,providing more number of islands than tags and eventually expecting an error of not enough tags provided,"{""island_list"": [""UUID1"", ""UUID2"", ""UUID3""], ""tags_list"": [[""tag1""], [""tag2""]]}","{""status"": 400, ""message"": ""Not enough tags provided.""}","def test_ideploy_add_tags_more_tag_count(run_api): """""" - Fetching the List of Jobs based on filter + ideploy add tags more than island comut """""" - r = run_api.rtask_list(filter) - test_assert.status(r, 200) + tag_name = ""test_tag"" + params = { + ""island_list"": [ + ""invalid"" + ], + ""tags_list"": [ + [ + tag_name + ], + [ + tag_name + ] + ] + } + r = run_api.ideploy_add_tag(params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Not enough islands to add tags to."", ""|> Json %s"" % rjson + " -​/rtask​/rest​/rlist​/,fetching the list of remote jobs / tasks using valid data,,"{ -""status"" : 200, -""response"" : Remote Task listed -}","def test_rtask_rlist(rtask_rlist): +ideploy/rest/add-tags,"providing invalid UUID in island_list, expecting an error for invalid island UUID.","{""island_list"": [""invalid_UUID""], ""tags_list"": [[""tag1""]]}","{""status"": 400, ""message"": ""Invalid island UUID.""}"," +def test_ideploy_add_tags_invalid_island_id(run_api): """""" - Fetching the List of Jobs + invalid id """""" - r = rtask_rlist - test_assert.status(r, 200) + params = { + ""island_list"": [ + ""inUUID"" + ], + ""tags_list"": [ + [ + ""tag_name"" + ] + ] + } + r = run_api.ideploy_add_tag(params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""DeployedIsland matching query does not exist."", ""|> Json %s"" % rjson " -​/rtask​/rest​/rlist​/,fetching the list of remote tasks when requested with invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_rtask_rlist_with_invalid_token(invalid_exec_api): +ideploy/rest/add-tags,"providing empty island_list and non-empty tags_list, expecting an error for not enough islands.","{""island_list"": [], ""tags_list"": [[""tag1""]]}","{""status"": 400, ""message"": ""Not enough islands to add tags to.""}","def test_ideploy_add_tags_empty_island_list(run_api): """""" - Fetching the List of Jobs with invalid token + invalid id """""" - r = invalid_exec_api.rtask_rlist() - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Invalid token."" + params = { + ""island_list"": [ + ], + ""tags_list"": [ + [ + ""tag_name"" + ] + ] + } + r = run_api.ideploy_add_tag(params) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""{'island_list': [ErrorDetail(string='This list may not be empty.', code='empty')]}"", ""|> Json %s"" % rjson " -​/rtask​/rest​/rlist​/,fetching the list of remote tasks with customized filters,,"{ -""status"" : 200, -""response"" : Filtered remote task listed -}","@pytest.mark.skip(reason=""cannot validate the remote tasks"") -def test_rtask_rlist_filter(run_api): +ideploy/rest/add-tags,"empty input data, expecting an error for missing required fields. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{""status"": 400, ""message"": ""Input data is missing required 'island_list' and 'tags_list' keys.""}","def test_ideploy_change_ownership_with_missing_fields(run_api): """""" - Fetching the List of Jobs by adding filters + change ownership with missing 'owner' field """""" - servers = [server[""hostname""] for server in run_api.server_list().json()[""results""]] - random_server = randint(0, 2) - owner_filter = {""user"": choice([1, 2, 3])} - task_for_filter = {""task_for"": servers[random_server]} - task_on_filter = {""task_on"": servers[random_server]} - status_filter = {""status"": choice(['created', 'delegated', 'started', 'finished', - 'failed', 'cancel', 'cancelling', 'cancelled'])} - search_filter = {""search"": choice([""Refresh"", ""BuildISOList"", ""DeleteRepoStoreFiles"", - DEFAULT_ADMIN_ACCOUNT['user'], DEFAULT_NON_ADMIN_ACCOUNT['user'], DEFAULT_MANAGER_ACCOUNT['user'], ""main"", ""mh"", ""mh-2""])} - filters = [owner_filter, task_for_filter, task_on_filter, status_filter, search_filter] - for filter in range(len(filters)): - r = run_api.rtask_rlist(filters[filter]) - test_assert.status(r, 200) + params = { + ""deployment_uuids"": [ + ""invalid"" + ], + ""dest_user"": ""manager"" + } + r = run_api.ideploy_change_ownership(params) + if run_api.user_type == USER_TYPE['non_admin']: + test_assert.status(r, 403) + rjson = r.json() + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""owner or dest_user cannot be null or empty"", ""|> Json %s"" % rjson " -​/rtask​/rest​/rlist​/,fetching the list of remote tasks with filters,,"{ -""status"" : 200, -""response"" : Remote Task listed -}","PARAMETERS = [ - {""page"": 1, ""page_size"": 5}, - {""search"": ""finished""}, - {""ordering"": ""mtime""}, - {""ordering"": ""-mtime""}, - {""ordering"": ""status""}, - {""ordering"": ""-status""}, - {""ordering"": ""job_type""}, - {""ordering"": ""-job_type""} -] - -@pytest.mark.parametrize(""filter"", PARAMETERS) -def test_rtask_rlist_with_filter(run_api, filter): +ideploy/rest/change_ownership,Successful change of ownership from one user to another where both users exist and the requester has the necessary permissions,"{ + ""deployment_uuids"": [ + deploy_id + ], + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" + }","{""status"": 200, ""message"": ""Operation performed successfully without any error""}"," +@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_ideploy_change_ownership(skip_if_non_admin, custom_ilib_non_admin_operations, run_api): """""" - Fetching the List of Jobs based on filter + Successful change of ownership from one user to another """""" - r = run_api.rtask_rlist(filter) + deploy_id = custom_ilib_non_admin_operations + params = { + ""deployment_uuids"": [ + deploy_id + ], + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" + } + r = run_api.ideploy_change_ownership(params) test_assert.status(r, 200) + island_detail = run_api.ideploy_details(deploy_id).json() + assert island_detail['island']['owner'] == 'manager', ""|> Json %s"" % island_detail + ilib_id = island_detail['island']['deploy_for']['uuid'] + run_api.ideploy_delete(deploy_id) + run_api.ilibrary_delete(ilib_id) + " -/server/rest/backup_complete/,"creating a backup complete token for the server using invalid token. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +ideploy/rest/change_ownership,"Partial success in changing ownership where some UUIDs fail. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ - 'token': 'invalid' -}","{ - ""status"" : 400, - ""message"" : ""Invalid token"" -}","def test_server_backup_complete_with_invalid_token(run_api): + ""deployment_uuids"": [ + ""invalid"" + ], + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" + }","{""status"": 207, ""message"": ""These objects failed to change their ownership: [\""invalid_UUID\""]""}","def test_ideploy_change_ownership_invalid_id(skip_if_non_admin, run_api): """""" - testing server backup_complete using invalid token + Partial success in changing ownership where some UUIDs fail. """""" params = { - 'token': 'invalid' + ""deployment_uuids"": [ + ""invalid"" + ], + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" } - r = run_api.server_backup_complete(params) - rjson = r.json() - if run_api.user_type == 'admin': - test_assert.status(r, 400) - assert rjson['result'] == 'FAILURE', rjson - assert rjson['error'] == 'Invalid Token', rjson - else: + r = run_api.ideploy_change_ownership(params) + if run_api.user_type == USER_TYPE['non_admin']: test_assert.status(r, 403) - assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + rjson = r.json() + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""The count of provided UUIDs doesn't match with the count of existing Deployments. Make sure that the provided UUIDs are valid, the deployment(s) is/are not a part of any Island, they belong to the 'vivekt' user and are for one category, either DeployedMachine or DeployedIsland"", ""|> Json %s"" % rjson + " -/server/rest/backup_manifest/,"back-up manifest api operation of server using empty string token. check the user type before performing the operation, only admin user type have the permission to perform such operations. - ","{ -""token"" :"""" +ideploy/rest/change_ownership,chaning ownership of an invalid deployed island from non-admin by an admin user ,"{ + ""deployment_uuids"": [""invalid""], + ""owner"", + ""dest_user"", }","{ - ""status"" : 400, - ""message"" : ""Token required"" -}","@pytest.mark.skip(""Skipping this because it returns status code :- 500 "") -def test_server_backup_manifest_empty_token(run_api): +""status"" : 400, +""message"":""Make sure that the provided UUIDs are valid"" +}","def test_ideploy_change_ownership_invalid_uuid(skip_if_not_admin, non_admin_exec_api, run_api): """""" - testing backup-manifest api using empty string token + To change ownership of invalid deployed island from non-admin user to admin user by admin """""" - params = {""token"": """"} - r = run_api.server_backup_manifest(params) - rjson = r.json() - if run_api.user_type == 'admin': - test_assert.status(r, 400) - assert rjson['error'] == 'Token Required', ""|> Json %s"" % rjson - else: - test_assert.status(r, 403) - assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + params = { + ""deployment_uuids"": [""invalid""], + ""owner"": non_admin_exec_api.user, + ""dest_user"": run_api.user + } + res = run_api.ideploy_change_ownership(params) + rjson = res.json() + test_assert.status(res, 400) + assert ""Make sure that the provided UUIDs are valid"" in rjson[""error""], rjson " -/server/rest/backup_manifest/,"back-up manifest api operation of server using invalid token. check the user type before performing the operation, only admin user type have the permission to perform such operations. - ","{ -""token"" :""invalid"" +ideploy/rest/change_ownership,chaning ownership from admin to non-admin of an deployed island machine by an admin user ,"{ + ""deployment_uuids"" + ""owner"" + ""dest_user"" }","{ - ""status"" : 400, - ""message"" : ""Token Invalid"" -}","def test_server_backup_manifest_invalid_token(run_api): +""status"" : 400, +""message"":""The provided UUIDs might belong to the DeployedMachine. Trigger the correct API"" +}","def test_ideploy_change_ownership_with_deployed_machine_uuid(skip_if_not_admin, deploy_image, non_admin_exec_api, run_api): """""" - testing backup-manifest api using invalid token + To change ownership of deployed machine from admin user to non-admin user by admin """""" - params = {""token"": ""invalid""} - r = run_api.server_backup_manifest(params) - rjson = r.json() - if run_api.user_type == 'admin': - test_assert.status(r, 400) - assert rjson['reason'] == 'Invalid Token', ""|> Json %s"" % rjson - else: - test_assert.status(r, 403) - assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + template, r = deploy_image + deploy_id = r.json()[""uuid""] + params = { + ""deployment_uuids"": [deploy_id], + ""owner"": run_api.user, + ""dest_user"": non_admin_exec_api.user + } + res = run_api.ideploy_change_ownership(params) + rjson = res.json() + test_assert.status(res, 400) + assert f""The provided UUIDs ['{deploy_id}'] might belong to the DeployedMachine. Trigger the correct API"" in rjson[""error""], rjson " -/server/rest/backup/,"creating a backup token for the server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ - ""status"" : 201, - ""response"" : success -}"," -def test_server_backup_token(run_api): +ideploy/rest/change_ownership,"Changing ownership with invalid deployment UUIDs format. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""deployment_uuids"": + {}, + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" + }","{""status"": 400, ""message"": ""Error message explaining invalid input format for UUIDs""}","def test_ideploy_change_ownership_with_invalid_data_type(run_api): """""" - create a backup token for the server + invalid input format for changing ownership """""" - r = run_api.server_backup_token() - rjson = r.json() - if run_api.user_type == 'admin': - test_assert.status(r, 201) - assert ""token"" in rjson, rjson - else: + params = { + ""deployment_uuids"": + {}, + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" + } + r = run_api.ideploy_change_ownership(params) + if run_api.user_type == USER_TYPE['non_admin']: test_assert.status(r, 403) - assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + rjson = r.json() + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + else: + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""deployment_uuids cannot be null or empty"", ""|> Json %s"" % rjson " -/server/rest/set_commitable_ram/,"setting negative value to commitable_ram _percent for server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +ideploy/rest/change_ownership,"Attempting to change ownership with an empty list of UUIDs. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ - 'commitable_ram_percent': -1 -}","{ -""status"" : 400, -""message"" : ""commitable_ram_percent should be greater than 0"" -} -","def test_server_set_commmitable_ram_commitable_ram_percent_is_negative(run_api): + ""deployment_uuids"": [], + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" +}","{""status"": 400, ""message"": ""please provide list of uuids""}","def test_ideploy_change_ownership_empty_list_uuid(run_api): """""" - server set commmitable ram is negative + change ownership with an empty list of UUIDs. """""" params = { - 'commitable_ram_percent': -1 + ""deployment_uuids"": [], + ""owner"": ""vivekt"", + ""dest_user"": ""manager"" } - r = run_api.server_set_commmitable_ram('invalid', params) - if run_api.user_type != 'admin': + r = run_api.ideploy_change_ownership(params) + if run_api.user_type == USER_TYPE['non_admin']: test_assert.status(r, 403) rjson = r.json() - rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson else: test_assert.status(r, 400) rjson = r.json() - rjson['error'] == ""commitable_ram_percent should be greater than 0"", ""|> json %s"" % rjson -" -/server/rest/set_commitable_ram/,"setting invalid server id to commitable_ram_percent for a server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ - 'commitable_ram_percent': 100 -}","{ -""status"" : 404, -""message"" : ""Sever not found"" -} -","def test_server_set_commmitable_invalid_id(run_api): + assert rjson['error'] == 'please provide list of uuids', ""|> %s"" % rjson" +ideploy/rest/change_ownership,Attempting to change ownership where the owner does not exist.,"{""deployment_UUIDs"": [""UUID1"", ""UUID2""], ""owner"": ""nonexistentowner"", ""dest_user"": ""newowner""}","{""status"": 400, ""message"": ""Either User 'nonexistentowner' or 'newowner' does not exist...""}","def test_ideploy_change_ownership_for_not_existing_owner(skip_if_not_admin, run_api, custom_lib_non_admin_operations): """""" - server invalid server id + To change ownership of deployed machine if one of the user do not exit """""" params = { - 'commitable_ram_percent': 100 + ""deployment_uuids"": ['invalid'], + ""owner"":""non-exiting-user"", + ""dest_user"": ""manager"" } - r = run_api.server_set_commmitable_ram('invalid', params) - if run_api.user_type != 'admin': + r = run_api.ideploy_change_ownership(params) + if run_api.user_type == USER_TYPE['non_admin']: test_assert.status(r, 403) rjson = r.json() - rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson else: - test_assert.status(r, 404) + test_assert.status(r, 400) rjson = r.json() - rjson['error'] == ""Server not found"", ""|> json %s"" % rjson + assert rjson['error'] == ""Owner does not exist..."", ""|> Json %s"" % rjson " -/server/rest/set_commitable_ram/,"setting invalid server id to commitable_ram_percent for a server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",{},"{ -""status"" : 400, -""message"" : ""commitable_ram_percent is required"" -} -","def test_server_set_commmitable_without_params(run_api): +ideploy/rest/change_ownership,"attempting to change ownership where the destination user does not exist check. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""deployment_uuids"": ['invalid'], + ""owner"": ""colama"", + ""dest_user"": ""non-exiting-user"" + }","{""status"": 400, ""message"": ""Either User 'currentowner' or 'nonexistentuser' does not exist...""}","def test_ideploy_change_ownership_user_does_not_exits(run_api): """""" - server with set commmitable ram + One of the user does not exits """""" - r = run_api.server_set_commmitable_ram('invalid', {}) - if run_api.user_type != 'admin': + params = { + ""deployment_uuids"": ['invalid'], + ""owner"": ""colama"", + ""dest_user"": ""non-exiting-user"" + } + r = run_api.ideploy_change_ownership(params) + if run_api.user_type == USER_TYPE['non_admin']: test_assert.status(r, 403) rjson = r.json() - rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson else: test_assert.status(r, 400) rjson = r.json() - rjson['error'] == ""commitable_ram_percent is required"", ""|> json %s"" % rjson + assert rjson['error'] == ""Either User 'colama' or 'non-exiting-user' does not exist..."", ""|> Json %s"" % rjson " -/server/rest/set_commitable_ram/,"setting server id to commitable_ram_percent which is greater than 100 for a server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +ideploy/rest/change_ownership,"Attempting to change ownership when the owner and destination user are the same. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ - 'commitable_ram_percent': 150 -}","{ -""status"" : 400, -""message"" : ""commitable_ram_percent should be less than or equal to 100"" -} -"," -def test_server_set_commmitable_ram_commitable_ram_percent_is_greater_than_100(run_api): + ""deployment_UUIDs"": [ + ""string"" + ], + ""owner"": ""colama"", + ""dest_user"": ""colama"" + }","{""status"": 400, ""message"": ""The dest_user and the owner should be different""}","def test_ideploy_change_ownership_same_owner_and_dest_owner(run_api): """""" - server set commmitable ram is greater than 100 + ideploy change ownership """""" params = { - 'commitable_ram_percent': 150 + ""deployment_UUIDs"": [ + ""string"" + ], + ""owner"": ""colama"", + ""dest_user"": ""colama"" } - r = run_api.server_set_commmitable_ram('invalid', params) - if run_api.user_type != 'admin': + r = run_api.ideploy_change_ownership(params) + if run_api.user_type == USER_TYPE['non_admin']: test_assert.status(r, 403) rjson = r.json() - rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson else: test_assert.status(r, 400) rjson = r.json() - rjson['error'] == ""commitable_ram_percent should be less than or equal to 100"", ""|> json %s"" % rjson + assert rjson['error'] == ""The dest_user and the owner should be different"", ""|> Json %s"" % rjson" +ideploy/rest/deploy_filter_fields/,successful filtering of the fields of deployed island machine ,,"{ + ""status"":200, + ""response"":list of filters +}","def test_ideploy_deploy_filter_fields(run_api): + """""" + ideploy deploy filter fields + """""" + r = run_api.ideploy_filter_fields() + test_assert.status(r, 200) " -/server/rest/test_connection/,testing the connection to the server,"{ - ""ip"" - ""port"": 22, - ""username"", - ""password"", +license/rest/licenses_check,checking the license when day params is negative,,"{ + ""result"": ""FAILURE"", + ""message"": ""Value of `days` cannot be negative"" +}","def test_license_check_when_day_is_negative(run_api): + """""" + license check when day is negative + """""" + r = run_api.license_check(days=-1) + rjson = r.json() + test_assert.status(r, 400) + assert rjson['error'] == ""Value of `days` cannot be negative"", ""The error %s"" % rjson +" +license/rest/licenses_check,checking the license when day params is zero,,"{ +""statuas"": 200, +""response"" : licence status +}","def test_license_check_when_day_is_zero(run_api): + """""" + license check when day is 0 + """""" + r = run_api.license_check() + rjson = r.json() + test_assert.status(r, 200) + assert rjson['warn'] is False, ""The error %s"" % rjson + assert rjson['msg'] == ""All good!"", ""The error %s"" % rjson +" +license/rest/licenses_check,checking license when day params is equal to duration of license,,"{ +""statuas"": 200, +""response"" : licence status +}","def test_license_check_when_day_is_equal_to_duration(admin_exec_api, run_api): + """""" + license check day is equal to duration + """""" + res = admin_exec_api.license_list() + license_list = res.json() + active_license_list = [licenses for licenses in license_list['results'] if licenses['state'] == 'active'] + durations = [json.loads(lic['data'])[""duration""] for lic in active_license_list] + duration = max(durations) + total_duration = duration + math.ceil(5 * duration / 100) + r = run_api.license_check(days=total_duration) + rjson = r.json() + test_assert.status(r, 200) + assert rjson['msg'] == ""Some License(s) are expiring soon"", ""The error %s"" % rjson +" +/deploy/rest/change_ownership/,changing ownership of deployed machine by admin user from non-admin to manager user ,"{ + ""deployment_uuids"": [uuid], + ""owner"", + ""dest_user"": ""manager"" }","{ -""status"" : 200, -""response"" :success -}","def test_server_test_connection(run_api): +""status"" :200 +}","def test_deploy_change_ownership(skip_if_non_admin, non_admin_exec_api, run_api): """""" - Testing the Connection to the Server + To change ownership of deployed machine from non-admin user to manager by admin """""" + params, r = non_admin_exec_api.library_add_new_vm() + lib_id = r.json()[""uuid""] + r = non_admin_exec_api.deploy_image(lib_id=lib_id, deploy_on=list(run_api.clm_my_servers.keys())) + uuid = r.json()['uuid'] params = { - ""ip"": run_api.node_ip, - ""port"": 22, - ""username"": DEFAULT_ROOT_ACCOUNT[""user""], - ""password"": DEFAULT_ROOT_ACCOUNT[""password""] + ""deployment_uuids"": [uuid], + ""owner"": non_admin_exec_api.user, + ""dest_user"": ""manager"" } - r = run_api.server_test_connection(params=params) + res = run_api.deploy_change_ownership(params=params) + test_assert.status(res, 200) + new_owner = run_api.deploy_details(deploy_id=uuid).json()['owner'] + assert new_owner == ""manager"" + run_api.deploy_image_delete(deploy_id=uuid) + run_api.library_delete(uuid=lib_id) +" +/deploy/rest/change_ownership/,changing ownership of deployed machine when the owner and destination owner are same,"{ + ""deployment_uuids"": [uuid], + ""owner"": prev_owner, + ""dest_user"": prev_owner + }","{ + ""status"" : 400, + ""message"" : 'The dest_user and the owner should be different' +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_change_ownership_for_same_users(skip_if_not_admin, run_api, custom_lib_non_admin_operations): + """""" + To change ownership of deployed machine if The dest_user and the owner should be same + """""" + deploy_id = custom_lib_non_admin_operations + uuid = deploy_id + prev_owner = run_api.deploy_details(deploy_id=uuid).json()['owner'] + params = { + ""deployment_uuids"": [uuid], + ""owner"": prev_owner, + ""dest_user"": prev_owner + } + res = run_api.deploy_change_ownership(params=params) + test_assert.status(res, 400) + assert res.json()['error'] == 'The dest_user and the owner should be different'" +/deploy/rest/change_ownership/,changing ownership of deployed machine by manager user from non-admin to manager user ,"{ + ""deployment_uuids"": [uuid], + ""owner"": owner, + ""dest_user"": dest_user + }","{ + ""status"" : 400, + ""message"" : "" 'manager' as a Manager user, does not have right over "" +}","@pytest.mark.parametrize(""manager_exec_api"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_change_ownership_without_rights(skip_if_not_non_admin, run_api, manager_exec_api, library_add_new_vm): + """""" + To change ownership of deployed machine from non-admin user to admin by manager + """""" + params, r = library_add_new_vm + lib_id = r[""uuid""] + r = run_api.deploy_image(lib_id=lib_id) + uuid = r.json()['uuid'] + owner = run_api.user + dest_user = ""colama"" + params = { + ""deployment_uuids"": [uuid], + ""owner"": owner, + ""dest_user"": dest_user + } + res = manager_exec_api.deploy_change_ownership(params=params) + test_assert.status(res, 400) + assert res.json()['error'] == f""'manager' as a Manager user, does not have right over '{owner}' or '{dest_user}'"" + manager_exec_api.deploy_image_delete(deploy_id=uuid) + manager_exec_api.library_delete(uuid=lib_id) +" +/deploy/rest/change_ownership/,changing ownership of deployed machine when one of the used does not exist,"{ + ""deployment_uuids"": [uuid], + ""owner"": prev_owner, + ""dest_user"": invalid_user + }","{ + ""status"" : 400, + ""message"" : ""Either User '{prev_owner}' or '{invalid_user}' does not exist..."" +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_change_ownership_for_not_existing_user(skip_if_not_admin, run_api, custom_lib_non_admin_operations): + """""" + To change ownership of deployed machine if one of the user do not exist + """""" + deploy_id = custom_lib_non_admin_operations + uuid = deploy_id + invalid_user = rand_string() + prev_owner = run_api.deploy_details(deploy_id=uuid).json()['owner'] + params = { + ""deployment_uuids"": [uuid], + ""owner"": prev_owner, + ""dest_user"": invalid_user + } + res = run_api.deploy_change_ownership(params=params) + test_assert.status(res, 400) + assert res.json()['error'] == f""Either User '{prev_owner}' or '{invalid_user}' does not exist..."" + + +" +/deploy/rest/change_ownership/,changing ownership of deployed machine by admin from admin user to non-admin,"{ + ""deployment_uuids"": [deploy_id], + ""owner"" + ""dest_user"" + }","{ + ""status"" : 400, + ""message"" : ""The provided UUIDs might belong to the DeployedIsland. Trigger the correct API"" +}","def test_deploy_change_ownership_with_island_uuid(skip_if_not_admin, ideploy_deploy, non_admin_exec_api, run_api): + """""" + To change ownership of deployed island from admin user to non-admin user by admin + """""" + template, r = ideploy_deploy + deploy_id = r.json()[""deploy_uuid""] + params = { + ""deployment_uuids"": [deploy_id], + ""owner"": run_api.user, + ""dest_user"": non_admin_exec_api.user + } + res = run_api.deploy_change_ownership(params=params) + rjson = res.json() + test_assert.status(res, 400) + assert f""The provided UUIDs ['{deploy_id}'] might belong to the DeployedIsland. Trigger the correct API"" in rjson[""error""], rjson +" +/deploy/rest/configure_autostart/,"setting a virtual machine to auto-start when the host machine reboots. Check the user type before performing the operation, only admin user type have the permission to perform such operations.",,"{ + ""status"" : 200 +}","def test_deploy_configure_autostart_vm_self(run_api, configure_autostart): + """""" + Set to Auto-start a VM when the host machine reboots + """""" + x, r = configure_autostart + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + else: + test_assert.status(r, 200) +" +/deploy/rest/configure_autostart/,setting a virtual machine to auto-start when the host machine reboots by an admin user ,,"{ + ""status"" : 200 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_configure_autostart_vm_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Set to Auto-start a VM when the host machine reboots by Admin + """""" + # Admin check for Auto-start a VM created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.configure_autostart(deploy_id) test_assert.status(r, 200) - result = r.json() - assert result[""ssh""][""success""] == 1, result " -/server/rest/test_connection/,testing th using invalid credentials,"{ - ""ip"" - ""port"": 22, - ""username"": ""invalid"", - ""password"":""invalid"", - }","{ -""status"" : 200, -""message"" : ""Authentication failed"" -}","def test_server_test_connection_invalid_credentials(run_api): +/deploy/rest/configure_autostart/,setting a virtual machine to auto-start when the host machine reboots by a non-admin user ,,"{ + ""status"" : 403 +}","@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_configure_autostart_vm_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Set to Auto-start a VM when the host machine reboots by non-admin + """""" + # Non-admin check for Auto-start a VM created by different user + deploy_id = custom_lib_admin_operations + r = run_api.configure_autostart(deploy_id) + test_assert.status(r, 403) +" +/deploy/rest/configure_autostart/,setting a virtual machine to auto-start when the host machine reboots by a manager when manager has rights over the server,,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_configure_autostart_vm_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Set to Auto-start a VM when the host machine reboots + """""" + # When the user is not part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_admin_operations + r = run_api.configure_autostart(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.configure_autostart(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + +" +/deploy/rest/configure_autostart/,setting a virtual machine to auto-start when the host machine reboots by a manager when manager does not have rights over the server,,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_configure_autostart_vm_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Set to Auto-start a VM when the host machine reboots + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.configure_autostart(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.configure_autostart(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + + +" +/deploy/rest/configure_autostart/,setting a virtual machine to auto-start when the host machine reboots by an admin user using invalid uuid,,"{ + ""status"" : 400, + ""message"" : 'Autostart is not allowed on machines which are part of island' +}","def test_deploy_configure_autostart_vm_in_island(skip_if_not_admin, run_api, ideploy_deploy): + """""" + Snapshot of the machine that is part of the island + """""" + params, r = ideploy_deploy + deploy_id = r.json()[""deploy_uuid""] + machine_id = run_api.ideploy_details(uuid=deploy_id).json()[""machines""][0][""uuid""] + res = run_api.configure_autostart(uuid=machine_id) + test_assert.status(res, 400) + assert res.json()[""error""] == 'Autostart is not allowed on machines which are part of island' +" +/deploy/rest/crash/,crashing a deployed machine successfully,,"{ + ""status"" : 201 +}","def test_deploy_crash(deploy_crash): + """""" + Crashing a Deployed Image + """""" + x, r = deploy_crash + test_assert.status(r, 201) +" +/deploy/rest/crash/,crashing a deployed machine successfully by an admin,,"{ + ""status"" : 201 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_crash_admin(skip_if_not_admin, run_api, custom_lib_non_admin_operations): """""" - Testing the Connection to the Server with invalid credentials + Crashing a Deployed Image by Admin """""" - params = { - ""ip"": run_api.node_ip, - ""port"": 22, - ""username"": ""invalid"", - ""password"": ""invalid"" - } - r = run_api.server_test_connection(params=params) - test_assert.status(r, 200) - result = r.json() - assert result[""ssh""][""success""] is False, result - assert result[""ssh""][""error""] == ""Authentication failed."", result + # Admin check of Crashing a Deployed Image created by different user + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, 201) " -/server/rest/test_connection/,testing the connection to the server with incorrect port number,"{ - ""ip"" - ""port"": 424, - ""username"", - ""password"", - }","{ -""status"" : 200, -""message"" : ""Unable to connect to port""} -","def test_server_test_connection_incorrect_port(run_api): +/deploy/rest/crash/,crashing a deployed machine successfully by a non-admin,,"{ + ""status"" : 403 +}","@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_crash_non_admin(skip_if_not_non_admin, run_api, custom_lib_admin_operations): """""" - Testing the Connection to the Server with incorrect port + Crashing a Deployed Image by non-admin """""" - params = { - ""ip"": run_api.node_ip, - ""port"": 424, - ""username"": DEFAULT_ROOT_ACCOUNT[""user""], - ""password"": DEFAULT_ROOT_ACCOUNT[""password""] - } - r = run_api.server_test_connection(params=params) - test_assert.status(r, 200) - result = r.json() - assert result[""ssh""][""success""] is False, result - assert ""Unable to connect to port"" in result[""ssh""][""error""], result + # Non-admin check of Crashing a Deployed Image created by different user + deploy_id = custom_lib_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, 403) " -/server/rest/test_connection/,testing the connection to the server with invalid port,"{ - ""ip"" - ""port"": 70000, - ""username"", - ""password"", - }","{ -""status"" : 400, -""message"" : ""Ensure this value is less than or equal to 65535"" -} -","def test_server_test_connection_invalid_port(run_api): +/deploy/rest/crash/,"crashing a deployed machine successfully by a manager, when manager has rights on server.",,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_crash_manager_server_right(skip_if_not_manager, run_api, custom_lib_admin_operations, custom_lib_non_admin_operations): """""" - Testing the Connection to the Server with invalid port + Crashing a Deployed Image by manager when have right on server """""" - params = { - ""ip"": run_api.node_ip, - ""port"": 70000, - ""username"": DEFAULT_ROOT_ACCOUNT[""user""], - ""password"": DEFAULT_ROOT_ACCOUNT[""password""] - } - r = run_api.server_test_connection(params=params) - test_assert.status(r, 400) - res = r.json() - assert 'FAILURE' in res[""result""], res - assert ""Ensure this value is less than or equal to 65535"" in res[""error""], res + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) " -/servers/rest/add/,"adding new server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -""status"" : 201 -}","def test_server_add(run_api, server_add_new): +/deploy/rest/crash/,"crashing a deployed machine successfully by a manager, when manager does not have rights on server.",,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_crash_manager_no_server_right(skip_if_not_manager, run_api, custom_lib_admin_operations, custom_lib_non_admin_operations): """""" - Add Server + Crashing a Deployed Image by manager when have no right on server """""" - template, result = server_add_new - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(result, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(result, 201) + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_crash(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + + " -/servers/rest/bulkops/,performing bulk operations on multiple existing servers where invalid operation is requested,"{ - ""server_list"": ['valid list'], - ""op"": 'invalid' +/deploy/rest/crash/,crashing a deployed machine using uuid for which machine does not exists,"{ +deploy_id = ""invalid"" }","{ -""status"" : 400, -""message"" : ""Unsupported operation. Available options are: [ 'syncrepo', 'delete', 'upgrade', 'lock_server', 'unlock_server', 'mark_for_maintenance', 'unmark_for_maintenance' ]"" -}","def test_server_bulkops_invalid_operation(skip_if_not_admin, run_api): + ""status"" : 404, + ""message"" : ""Machine matching query does not exist"" +}","def test_deploy_crash_invalid_uuid(run_api): """""" - invalid bulkops operation + crashing deployed machine using invalid uuid """""" - bulkops = { - ""server_list"": 'invalid', - ""op"": 'invalid' - } - r = run_api.server_bulkops(bulkops) - test_assert.status(r, 400) + deploy_id = ""invalid"" + r = run_api.deploy_crash(deploy_id, wait=False) + test_assert.status(r, 404) rjson = r.json() - assert rjson['error'] == ""Unsupported operation. Available options are: ['syncrepo', 'delete', 'upgrade', 'lock_server', 'unlock_server', 'mark_for_maintenance', 'unmark_for_maintenance']"", ""|> Json %s"" % rjson" -/servers/rest/bulkops/,"performing bulk operations on multiple existing servers where valid operation is requested. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ - ""server_list"": ['valid list'], - ""op"": 'valid' + assert ""Machine matching query does not exist"" in rjson[""error""], rjson +" +/deploy/rest/crash/,crashing a deployed machine without authorization,"{ +deploy_id = ""invalid"" }","{ -""status"" : 202, -""response"" : success -}","@pytest.mark.parametrize(""operation"", SERVER_BULK_OPS, indirect=True) -def test_server_bulkops(run_api, server_bulkops, operation): + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_deploy_crash_without_authorization(anonymous_exec_api): """""" - Bulk Operations in Server + crashing deployed machine without authorization """""" - r = server_bulkops - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 202) + deploy_id = ""invalid"" + depl_crash = anonymous_exec_api.deploy_crash(deploy_id, wait=False) + depl_json = depl_crash.json() + test_assert.status(depl_crash, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" " -/servers/rest/bulkops/,performing bulk operations on non-existing servers where valid operation is requested,"{ - ""server_list"": ['invalid list'], - ""op"": 'valid' +/deploy/rest/crash/,crashing a deployed machine using invalid token,"{ +deploy_id = ""invalid"" }","{ -""status"" : 400, -""message"" : ""Server does not exists"" -}","def test_sever_bulkops_invalid_server_id(skip_if_not_admin, run_api): + ""status"" : 401, + ""message"" : ""Invalid token."" +}","def test_deploy_crash_invalid_token(invalid_exec_api): """""" - invalid server id + crashing deployed machine using invalid token """""" - SERVER_BULK_OPS = ['syncrepo', 'delete'] - for ops in SERVER_BULK_OPS: - bulkops = { - ""server_list"": 'invalid', - ""op"": ops - } - r = run_api.server_bulkops(bulkops) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['failure'][0]['error'] == 'Server does not exist', ""|> Json %s"" % rjson + deploy_id = ""invalid"" + depl_crash = invalid_exec_api.deploy_crash(deploy_id, wait=False) + depl_json = depl_crash.json() + test_assert.status(depl_crash, 401) + assert depl_json[""detail""] == ""Invalid token."" " -/servers/rest/bulkops/,"performing api bulk operations on server using empty list of server _list.Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ - ""server_list"": [], - ""op"": 'mark_for_maintenance' -}","{ - ""status"" : 400, - ""message"" : ""server_list cannot be null or empty"" -}","def test_server_bulkops_empty_server_list(run_api): +/deploy/rest/delete/,deleteing virtual machine of a deployed machine,,"{ +""sttaus"" : 201 +}","def test_deploy_delete(deploy_delete): """""" - testing server bulkops api using params as empty server list + Deleting the VM """""" - bulkops = { - ""server_list"": [], - ""op"": 'mark_for_maintenance' - } - r = run_api.server_bulkops(bulkops) - rjson = r.json() - if run_api.user_type == 'non-admin': - test_assert.status(r, 403) - assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson - else: - test_assert.status(r, 400) - assert rjson[""result""] == ""FAILURE"", rjson - assert ""server_list cannot be null or empty"" in rjson[""error""], rjson + x, r = deploy_delete + test_assert.status(r, 201) " -/servers/rest/delete/{{UUID}}/,"requesting to delete server by searching with invalid server_id.Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ -server_id = ""invalid"" -}","{ -""status"" : 404, -}","def test_server_delete_invalid_id(run_api): +/deploy/rest/delete/,deleteing virtual machine of a deployed machine by an admin user,,"{ +""sttaus"" : 201 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_delete_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): """""" - invalid server id + Deleting the VM by Admin """""" - server_id = 'invalid' - r = run_api.server_delete(server_id) - if run_api.user_type == 'admin': - test_assert.status(r, 404) - rjson = r.json() - assert rjson['error'] == 'Delete: Server not found', ""|> Json %s"" % rjson - else: - test_assert.status(r, 403) - rjson = r.json() - rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + # Admin check for Deleting the Deployed VM created by different user. + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, 201) +" +/deploy/rest/delete/,deleteing virtual machine of a deployed machine by a non-admin user,,"{ +""sttaus"" : 403 +}","@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_delete_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): + """""" + Deleting the VM by non-Admin + """""" + # Non-admin check for Deleting the Deployed VM created by different user. + deploy_id = custom_lib_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, 403) +" +/deploy/rest/delete/,"deleteing virtual machine of a deployed machine by a manager, when manager has right over the server",,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_delete_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): + """""" + Deleting the VM by Manager + """""" + # When the user is not part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + " -/servers/rest/delete/{{UUID}}/,requesting to delete server by searching with valid data for an existing deployment,"{ - 'search': server_name -}","{ -""status"" : 400, -""message"" : ""Cannot delete a server while deployment exists"" -}","def test_server_delete_while_deployments_exits(skip_if_not_admin, deploy_image, run_api): +/deploy/rest/delete/,"deleteing virtual machine of a deployed machine by a manager, when manager does not have right over the server",,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_delete_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - delete a server while deployments exist + Deleting the VM by Manager """""" - p, r = deploy_image - server_name = r.json()['server'] - params = { - 'search': server_name - } - res = run_api.server_list(params).json() - server_id = res['results'][0]['uuid'] - r = run_api.server_delete(server_id) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == 'Cannot delete a server while deployments exist', ""|> Json %s"" % rjson + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_image_delete(deploy_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + + " -/servers/rest/delete/{{UUID}}/,"deleting server using invalid uuid. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ -""server_id"" :'invalid' +/deploy/rest/delete/,deleting image of deployed machine without authorization,"{ +deply_id : ""invalid"" }","{ - ""status"" : 404, - ""message"" : ""Delete : server not found"" -}","def test_server_delete_invalid_id(run_api): + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_deploy_delete_without_authorization(anonymous_exec_api): """""" - invalid server id + deleting image of deployed machine without authorization """""" - server_id = 'invalid' - r = run_api.server_delete(server_id) - if run_api.user_type == 'admin': - test_assert.status(r, 404) - rjson = r.json() - assert rjson['error'] == 'Delete: Server not found', ""|> Json %s"" % rjson - else: - test_assert.status(r, 403) - rjson = r.json() - rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + deploy_id = ""invalid"" + depl_delete = anonymous_exec_api.deploy_image_delete(deploy_id, {}, wait=False) + depl_json = depl_delete.json() + test_assert.status(depl_delete, 401) + assert depl_json[""detail""] == ""Authentication credentials were not provided."" + + " -/servers/rest/delete/{{UUID}}/,"deleting a server when its status is set to ""online"". Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ -""status"": ""Online"", -""total_machine"": 0 -} -","{ - ""status"" : 400, - ""message"" : ""Cannot delete a Server which is in Online state"" -}","def test_server_delete_status_online(run_api): +/deploy/rest/delete/,deleting image of deployed machine using invalid token,"{ +deply_id : ""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_deploy_delete_invalid_token(invalid_exec_api): """""" - delete a server when it's status is Online + deleting image of deployed machine using invalid token """""" - params = {""status"": ""Online"", ""total_machine"": 0} - _, server_list = run_api.filter_servers_matching_with_criteria(params, list(run_api.clm_my_servers.values())) - if server_list: - r = run_api.server_delete(server_list[0]) - rjson = r.json() - if run_api.user_type == 'admin': - test_assert.status(r, 400) - assert ""Cannot delete a Server which is in Online state"" in rjson[""error""], rjson - else: - test_assert.status(r, 403) - assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + deploy_id = ""invalid"" + depl_delete = invalid_exec_api.deploy_image_delete(deploy_id, {}, wait=False) + depl_json = depl_delete.json() + test_assert.status(depl_delete, 401) + assert depl_json[""detail""] == ""Invalid token."" + + " -/servers/rest/detail/{{UUID}}/,getting details of server of existing id,,"{ -""status"" : 200 -}","def test_server_details(server_details): +/deploy/rest/delete/,deleting image of deployed machine which is part of the island,,"{ + ""status"" : 400, + ""message"" : ""Cannot perform delete operation on machine which is part of an Island"" +}","def test_deploy_delete_machine_part_of_island(run_api, ideploy_deploy): """""" - Getting details of Server + Deletion of the machine that is part of the island """""" - template, r = server_details - result = r.json() - test_assert.status(template, result, ""server_details"") - test_assert.status(r, 200) + params, r = ideploy_deploy + deploy_id = r.json()[""deploy_uuid""] + machine_id = run_api.ideploy_details(uuid=deploy_id).json()[""machines""][0][""uuid""] + res = run_api.deploy_image_delete(deploy_id=machine_id) + test_assert.status(res, 400) + assert res.json()[""error""] == ""Cannot perform delete operation on machine which is part of an Island"" + " -/servers/rest/detail/{{UUID}}/,getting details of server of non-existing id,"{ -server_id = ""invalid"" -}","{ -""status"" : 404, -""message"" : ""Not found"" -}","def test_server_details_invalid_uuid(run_api): +/deploy/rest/delete/,deleting image of deployed machine which is protected,,"{ + ""status"" : 400, + ""message"" : ""Cannot perform delete operation on machine because it has been protected"" +}","def test_deploy_delete_protected_machine(run_api, deploy_protect): """""" - fetch server details with invalid uuid + Deletion of the machine that is protected """""" - server_id = 'invalid' - r = run_api.server_details(server_id) - test_assert.status(r, 404) - rjson = r.json() - assert rjson['detail'] == 'Not found.', ""|> json %s"" % rjson + r = deploy_protect + deploy_id = r.json()[""uuid""] + res = run_api.deploy_image_delete(deploy_id) + test_assert.status(res, 400) + assert res.json()[""error""] == ""Cannot perform delete operation on machine because it has been protected"" " -/servers/rest/list/,getting the list of servers,,"{ -""status"" : 200, -""response"" : server list -}","def test_server_list(server_list): +/deploy/rest/deploylist,getting list of image of deployed machine,,"{ +""status"":200 +}","def test_deploy_deploylist(deploy_deploylist): """""" - Getting the list Server + Getting deploy details of VM """""" - r = server_list - test_assert.status(r, SERVER_LIST, ""server_list"", ""hostname"") + r = deploy_deploylist test_assert.status(r, 200) " -/servers/rest/list/,getting the list of servers excluding some servers using group_id,"{ -exclude_group_id = [server_list_to_exclude] -}","{ -""response"" : server list -}","def test_server_list_by_excluding_group(skip_if_not_admin, run_api, group_add): +/deploy/rest/deploylist,getting filtered list of image of deployed machine,,"{ + ""response"" : success +}","@pytest.mark.parametrize(""lib_filter_kwargs"", [{""vm_names"": [f""{prefix_name}{rand_string()}"" for _ in range(library_count)]}], indirect=True) +def test_deploy_deploylist_filter(run_api: apiops, lib_filter_kwargs): """""" - fetch server list by excluding group + Fetching the list of deployed images by adding filters """""" - p, r = group_add - rjson = r.json() - params = { - 'exclude_group_id': rjson['id'] - } - # New added group does not have any server so it will return all the server list - servers1 = run_api.server_list(params).json() - # Fetching the server list - servers2 = run_api.server_list().json() - assert servers1['count'] == servers2['count'] + depl_res = [] + templates, res = lib_filter_kwargs + for r in res: + rjson = r.json() + depl_r = run_api.deploy_image(rjson[""uuid""]) + depl_res.append(depl_r) + try: + filter_on_input_result(run_api, library_count, templates, depl_res, prefix_name, run_api.deploy_deploylist) + finally: + depl_uuids = [depl.json()[""uuid""] for depl in depl_res] + run_api.deploy_bulkops({""machine_list"": depl_uuids, ""op"": ""delete""}) " -/servers/rest/list/,getting the list of servers excluding some servers using group_name,"{ -exclude_group_name = [server_list_to_exclude] -}","{ -""response"" : server list -}","def test_server_list_by_excluding_group_name(run_api): +/deploy/rest/deploylist,getting list of image of deployed machine using tag_list,,"{ + ""response"" : success +}","def test_deploylist_machine_tag_detail(run_api, deploy_image): """""" - fetch server list by excluding_group name + Get machine based on tag filter """""" - group_name = rand_string(10) - params = { - 'exclude_group_name': group_name - } - res = run_api.server_list(params).json() - assert res['count'] == 0, ""|> Json %s"" % res + params, r = deploy_image + machine_id = r.json()[""uuid""] + tag = rand_string() + params = {""machine_list"": [machine_id], ""tags_list"": [[tag]]} + run_api.deploy_add_tags(params=params) + res = run_api.deploy_deploylist(params={""tags"": tag}) + rjson = res.json() + # assert rjson[""count""] == 1, ""The error is %s"" % res.json() + for machine in rjson['results']: + machine_details = run_api.deploy_details(machine['uuid']).json() + all_tags = [tags['value'] for tags in machine_details['tags']] + assert tag in all_tags, ""|> Json %s"" % all_tags " -/servers/rest/list/,getting the list of servers excluding some servers using search parameter,"{ - 'search': hostname -}","{ -""response"" : server list -}","def test_server_list_by_search(run_api): +/deploy/rest/deploylist,getting list of image of deployed machine using server parameter,,"{ + ""response"" : success +}","def test_deploy_list_with_deploylist_machine_server_filter(run_api, deploy_image): """""" - fetch server list based on search params + fetch list with server """""" - server_list = run_api.server_list().json() - hostname = choice([server['hostname'] for server in server_list['results']]) - params = { - 'search': hostname - } - result = run_api.server_list(params).json() - for server in result['results']: - assert server['hostname'] == hostname, ""|> json %s"" % server - -" -/servers/rest/list/,getting the list of servers excluding some servers using server UUID,"{ - 'uuid': server_uuid -}","{ -""response"" : server list -}","def test_server_list_by_uuid(run_api): + p, r = deploy_image + server = r.json()['server'] + params = {""server"": server} + rjson = run_api.deploy_deploylist(params).json() + for machines in rjson['results']: + assert machines['server'] == server, ""Json |> %s"" % machines" +/deploy/rest/deploylist,getting list of image of deployed machine using session_id parameter,,"{ + ""response"" : success +}","def test_deploy_list_with_session_id_filter(deploy_image, run_api): """""" - fetch server list based on server uuid + Fetch list with session_id """""" - server_list = run_api.server_list().json() - server_uuid = choice([server['uuid'] for server in server_list['results']]) - params = { - 'uuid': server_uuid - } - result = run_api.server_list(params).json() - for server in result['results']: - assert server['uuid'] == server_uuid, ""|> json %s"" % server + params, r = deploy_image + rjson = r.json() + machine_id = rjson['uuid'] + session_id = rjson[""machine""][""tags""][0][""value""] + params = {""_sessionid"": session_id, ""uuid"": machine_id} + assert run_api.deploy_deploylist(params).json()[""count""] == 1 " -/servers/rest/list/,getting the list of servers using group id,"{ - 'group_id': group_id -}","{ -""response"" : server list -}","def test_server_list_by_group_id(skip_if_not_admin, group_add, run_api): +/deploy/rest/deploylist,getting list of image of deployed machine using auto-start parameter,,"{ + ""response"" : success +}","def test_deploy_list_with_autostart_filter(deploy_image, run_api): """""" - fetch server list by group id + Fetch list with autostart filter """""" - params, r = group_add + params, r = deploy_image rjson = r.json() - group_id = rjson['id'] - group_name = rjson['name'] - servers_list = { - ""servers_list"": list(run_api.clm_my_servers.values()) - } - run_api.group_add_server(servers_list, group_id) - params = { - 'group_id': group_id - } - servers = run_api.server_list(params).json() - for server in servers['results']: - server_details = run_api.server_details(server['uuid']).json() - server_in_groups = [group['name'] for group in server_details['groups']] - assert group_name in server_in_groups, ""|> Json %s"" % server_details + autostart = rjson[""AutoStart""] + params = {""autostart"": autostart} + rjson = run_api.deploy_deploylist(params).json() + for machines in rjson['results']: + assert machines['AutoStart'] == autostart, ""Json |> %s"" % machines " -/servers/rest/list/,getting the list of servers using group name,"{ - 'group_name': group_name -}","{ -""response"" : server list -}","def test_server_list_by_group_name(run_api): +/deploy/rest/deploylist,getting list of image of deployed machine using state parameter,,"{ + ""response"" : success +}","def test_deploy_list_with_machine_state_filter(run_api, deploy_image): """""" - fetch server list by group name + fetch list with deploy machine state filter """""" - group_name = rand_string(10) - params = { - 'group_name': group_name - } - res = run_api.server_list(params).json() - assert res['count'] == 0, ""|> Json %s"" % res + p, r = deploy_image + state = r.json()['state'] + params = {""state"": state} + rjson = run_api.deploy_deploylist(params).json() + for machines in rjson['results']: + assert machines['state'] == state, ""Json |> %s"" % machines " -/servers/rest/list/,getting the list of servers using invalid group id,"{ - 'group_id': invalid_group_id +/deploy/rest/deploylist,getting list of image of deployed machine using invalid value of group_name parameter,"{ +group_name: ""invalid"" }","{ -""response"" : failure -}","def test_server_list_by_invalid_group_id(run_api): +""response"" :Select a valid choice, the one you provided is not one of the available choices.' +}","def test_deploy_list_filter_with_invalid_group(run_api): """""" - fetch server list by group + Testing invalid group name filtering """""" - group_id = 0 - params = { - 'group_id': group_id - } - res = run_api.server_list(params).json() - assert res['count'] == 0, ""|> Json %s"" % res - + group_name = ""invalid"" + params = {'group': group_name} + response = run_api.deploy_deploylist(params) + x = response.json() + test_assert.status(response, 400) + assert x[""group""] == [f'Select a valid choice. {group_name} is not one of the available choices.'] " -/servers/rest/list/,fetching the server list by setting the replication status,"{ -""status"" :""installing"" -} -",,"def test_server_list_by_installation_status(run_api): +/deploy/rest/deploylist,getting count of all images in a deployed machine ,,"{ + ""status"" : 200 +}","def check_count_deploylist(run_api, deploy_id, params={}): """""" - fetch server list by replication status + getting count of all images in a deployed machine """""" - params = { - 'status': ""Installing"" - } - res = run_api.server_list(params).json() - assert res['count'] == 0, ""|> Json %s"" % res + r = run_api.deploy_deploylist(params) + res = r.json() + test_assert.status(r, 200) + count = 0 + for dict in res['results']: + if dict[""uuid""] == deploy_id: + count += 1 + break + return count " -/servers/rest/list/,fetching the server list by setting the scope parameter,"{ -""scope"" : ""my"" -}","{ -""status"" :200 -}","def test_server_list_by_scope(run_api): +/deploy/rest/deploylist,"getting list of image of deployed machine by setting scope to ""all"" by a non-admin user. ","{ +scope : ""all"" +}",,"@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_deploylist_non_admin_scope_all(skip_if_not_non_admin, custom_lib_admin_operations, run_api): """""" - fetch server list using scope :- 'my' + DeployList with scope=all of the VM by non-Admin """""" - params = { - 'scope': ""my"" - } - res = run_api.server_list(params) - test_assert.status(res, 200)" -/servers/rest/syncrepo/,"syncing the layers on server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -""status"" :201, -""reponse"" : success -}","@pytest.mark.skip(reason=""having issue in this testcase"") -def test_server_syncrepo(run_api, server_syncrepo): + deploy_id = custom_lib_admin_operations + count = check_count_deploylist(run_api, deploy_id, params={'scope': 'all', 'uuid': deploy_id}) + assert count == 0" +/deploy/rest/deploylist,"getting list of image of deployed machine by setting scope to ""all"" by a manager who has rights over the server","{ +scope : ""all"" +}",,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_details_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - Sync the layers on server + Details of the VM by Manager """""" - r = server_syncrepo - if run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 201) + # When the user is not part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_admin_operations + count = check_count_deploylist(run_api, deploy_id, params={'scope': 'all', 'uuid': deploy_id}) + assert count == 0 + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + count = check_count_deploylist(run_api, deploy_id, params={'scope': 'all', 'uuid': deploy_id}) + assert count == 1 + " -/servers/rest/syncrepo/{{UUID}},"syncing layers on server using existing UUID and server is running.Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -""status"" : 403 / 201 -}","def test_server_syncrepo(run_api, server_syncrepo): +/deploy/rest/deploylist,"getting list of image of deployed machine by setting scope to ""all"" by a manager who does not have rights over the server","{ +scope : ""all"" +}",,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_details_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - Sync the layers on server + Details of the VM by Manager """""" - r = server_syncrepo - if run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 201) -" -/servers/rest/upgradeserver/{UUID}/,"updating server using invalid data.Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ -server_id = ""invalid"" -}","{ -""status"" : 404, -""message"" : ""Server not found"" -}","def test_server_upgrade_invalid_uuid(run_api): + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + count = check_count_deploylist(run_api, deploy_id, params={'scope': 'all', 'uuid': deploy_id}) + assert count == 0 + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + count = check_count_deploylist(run_api, deploy_id, params={'scope': 'all', 'uuid': deploy_id}) + assert count == 0" +/ideploy/rest/edit/,editing an image of a deployed island,,"{ + ""response"" : success +}"," +@pytest.mark.parametrize(""cpucount"", CPU_COUNT, indirect=True) +@pytest.mark.parametrize(""ram"", RAM, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_edit(run_api, custom_lib_admin_operations, deploy_edit, cpucount, ram): """""" - server upgrade with invalid server id + Editing the VM """""" - server_id = 'invalid' - r = run_api.server_upgrade(server_id) - if run_api.user_type == 'admin': - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == 'Server Upgrade API isn\'t implemented. Use ServerBulkOps with ""upgrade"" as operation to upgrade Managed Hosts', ""|> Json %s"" % rjson - else: - test_assert.status(r, 403) - rjson = r.json() - assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + params, r = deploy_edit + test_assert.status(r, params, ""deploy_edit"") " -/servers/rest/upgradeserver/{UUID}/,"updating server using valid existing data.Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -""status"" : 201, -""response"" : server updated -}","def test_server_upgradeserver(run_api, server_upgradeserver): +/ideploy/rest/edit/,"editing an image of a deployed island by deploying it first and than starting, and than stopping by admin ",,"{ + ""status"" : 202 +}","@pytest.mark.parametrize(""cpucount"", [1], indirect=True) +@pytest.mark.parametrize(""ram"", [100], indirect=True) +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_edit_vm_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api, cpucount, ram): """""" - Updating sever + Deploying a Image and Starting the VM and then Stopping by Admin """""" - r = server_upgradeserver - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 201) + # Admin check of Starting a deployment created by different user + deploy_id = custom_lib_non_admin_operations + params, r = run_api.deploy_edit(deploy_id, cpus=cpucount, ram=ram) + test_assert.status(r, 202) " -/shares/rest/add/{{UUID}}/,adding new object to vm ,,"{ -""status"" : 201, -""response"" : success -}","def test_shares_add(shares_add): +/ideploy/rest/edit/,"editing an image of a deployed island by deploying it first and than starting, and than stopping by non-admin ",,"{ + ""status"" : 403 +}","@pytest.mark.parametrize(""cpucount"", [1], indirect=True) +@pytest.mark.parametrize(""ram"", [100], indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_edit_vm_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api, cpucount, ram): """""" - Adding new object to the vm + Deploying a Image and Starting the VM and then Stopping """""" - template, r = shares_add - test_assert.status(r, 201) + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_lib_admin_operations + params, r = run_api.deploy_edit(deploy_id, cpus=cpucount, ram=ram) + test_assert.status(r, 403) " -/shares/rest/list/,fetching the shares list of machine,,"{ -""status"" : 201, -""response"" : success -}","def test_shares_list(shares_list): +/ideploy/rest/edit/,"editing an image of a deployed island by deploying it first and than starting, and than stopping by manager who has rights over the server",,,"@pytest.mark.parametrize(""cpucount"", [1], indirect=True) +@pytest.mark.parametrize(""ram"", [100], indirect=True) +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_edit_vm_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api, cpucount, ram): """""" - Fetch list of shares of machine + Deploying a Image and Starting the VM and then Stopping """""" - r = shares_list - test_assert.status(r, 200) + # When the user is not part of the group that the manager manages + deploy_id = custom_lib_admin_operations + params, r = run_api.deploy_edit(deploy_id, cpus=cpucount, ram=ram) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + params, r = run_api.deploy_edit(deploy_id, cpus=cpucount, ram=ram) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + + " -/tags/rest/add/{UUID}/,adding tag for invalid UUID of machine,"{ -vm_uuid = ""invalid"" -}","{ -""status"" : 404, -""response"" : ""No object with given uuid"" -}","def test_tags_add_with_invalid_uuid(run_api): +/ideploy/rest/edit/,"editing an image of a deployed island by deploying it first and than starting, and than stopping by manager who does not have right over the server",,,"@pytest.mark.parametrize(""cpucount"", [1], indirect=True) +@pytest.mark.parametrize(""ram"", [100], indirect=True) +@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_edit_vm_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api, cpucount, ram): """""" - add tag with invalid uuid + Deploying a Image and Starting the VM and then Stopping """""" - vm_uuid = ""invalid"" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + params, r = run_api.deploy_edit(deploy_id, cpus=cpucount, ram=ram) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + params, r = run_api.deploy_edit(deploy_id, cpus=cpucount, ram=ram) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + - tag_params, result = run_api.tag_add(vm_uuid,) - test_assert.status(result, 404) - rjson = result.json() - assert rjson['error'] == ""No object with given uuid"", ""The error message is %s"" % (rjson['error']) " -/tags/rest/add/{UUID}/,adding tag for valid existing UUID of machine,,"{ -""status"" : 201, -""response"" : added tag -}","def test_tags_add(run_api, library_add_new_vm): +/ideploy/rest/edit/,editing an image of a deployed island which is running state,,,"def test_deploy_edit_running_vm(run_api, deploy_start): """""" - add tag with valid data + Edit a running VM """""" - params, r = library_add_new_vm - vm_uuid = r['uuid'] - - # Add Tag - tag_params, result = run_api.tag_add(vm_uuid) - test_assert.status(result, 201) - res = run_api.tag_list(tag_params, filter_search={""object_uuid"": vm_uuid}) - results = res.json()[""results""] - tag = results[-1] - r = run_api.tag_delete(id=tag[""id""], params={}) + params, x = deploy_start + machine_id = x.json()[""uuid""] + params, r = run_api.deploy_edit(deploy_id=machine_id) + test_assert.status(r, 400), ""The error is %s"" % r.json() + assert r.json()[""error""] == 'Machine can only be edited when it is in stopped state' " -/tags/rest/add/{UUID}/,adding tag for valid existing UUID with empty name attribute in the body,"{ -""tag_list"" = - [ - { - ""name"" : """" - ""value"": ""494"", - ""description"": ""test"" - } - ] +/ideploy/rest/edit/,editing an image of a deployed island for machine that does not exists,"{ +uuid : ""invalid"" }","{ -""status"" : 400, -""response"" : ""This field may not be blank"" -}","def test_tags_add_with_empty_name(run_api, library_add_new_vm): + ""status"" : 404, + ""message"" : 'Machine with uuid does not exist' +}","def test_deploy_edit_invalid_name(run_api): """""" - add tag with empty name + Edit a machine that do not exist """""" - params, r = library_add_new_vm - vm_uuid = r['uuid'] - - params = {""tag_list"": [{""name"": """", ""value"": ""494"", ""description"": ""test""}]} - tag_params, result = run_api.tag_add(vm_uuid, params) - test_assert.status(result, 400) - rjson = result.json() - msg = rjson['tag_list'][0]['name'][0] - assert msg == ""This field may not be blank."", ""The error message is %s"" % (msg) + uuid = ""invalid"" + params, r = run_api.deploy_edit(deploy_id=uuid) + test_assert.status(r, 404), ""The error is %s"" % r.json() + assert r.json()[""error""] == 'Machine with uuid [invalid] does not exist' " -/tags/rest/add/{UUID}/,adding tag for valid existing UUID without name attribute in body,"{ -""tag_list"" = - [ - { - ""value"": ""494"", - ""description"": ""test"" - } - ] +/ideploy/rest/edit/,"editing an image of a deployed island for machine , by providing machine name that contains slash.","{ +uuid : ""/invalid/name"" }","{ -""status"" : 400, -""response"" : ""This field is required"" -}","def test_tags_add_without_name(run_api, library_add_new_vm): + ""status"" : 400, + ""message"" : ""Name cannot contain '/'. "" +}","def test_deploy_edit_with_invalid_name(run_api, deploy_image): """""" - add tag without name + To edit the machine and pass the name with '/' in it """""" - params, r = library_add_new_vm - vm_uuid = r['uuid'] - - params = {""tag_list"": [{""value"": ""494"", ""description"": ""test""}]} - tag_params, result = run_api.tag_add(vm_uuid, params) - test_assert.status(result, 400) - rjson = result.json() - msg = rjson['tag_list'][0]['name'][0] - assert msg == ""This field is required."", ""The error message is %s"" % (msg) + params, r = deploy_image + machine_id = r.json()[""uuid""] + invalid_name = ""/invalid/name/"" + params, x = run_api.deploy_edit(deploy_id=machine_id, name=invalid_name) + test_assert.status(x, 400) + assert x.json()[""error""] == ""Name cannot contain '/'"" " -/tags/rest/add/{UUID}/,adding tag using invalid token,"{ -vm_uuid = ""invalid"" -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_tags_add_invalid_token(invalid_exec_api): +/ideploy/rest/edit/,editing an image of a deployed island for machine that does not exists,,"{ + ""status"" : 400, + ""message"" : 'Name cannot be longer than 100 characters' +}","def test_deploy_edit_with_very_long__name(run_api, deploy_image): """""" - invalid token + To edit the machine and pass the name with more than 100 characters """""" - - vm_uuid = ""invalid"" - p, r = invalid_exec_api.tag_add(vm_uuid) - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) + invalid_name = rand_string(char_size=101) + params, r = deploy_image + machine_id = r.json()[""uuid""] + params, x = run_api.deploy_edit(deploy_id=machine_id, name=invalid_name) + test_assert.status(x, 400) + assert x.json()[""error""] == 'Name cannot be longer than 100 characters' " -/tags/rest/add/{UUID}/,adding tag without authorization,"{ -vm_uuid = ""valid"" -}","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided."" -}","def test_tags_add_without_authorization(anonymous_exec_api): +/deploy/rest/get_ip/,fetching the IP of a deployed machine which is currently in stopped state,,"{ + ""status"" : 400, + ""message"" : ""Machine is not in running state so cannot fetch ip"" +}","def test_deploy_get_ip_stopped_machine(run_api, deploy_image): """""" - without authorization + get_ip of a deployed machine which is stopped """""" - vm_uuid = ""invalid"" - p, r = anonymous_exec_api.tag_add(vm_uuid,) - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) + template, r = deploy_image + deploy_id = r.json()[""uuid""] + r = run_api.deploy_get_ip(deploy_id) + res = r.json() + test_assert.status(r, 400) + assert res[""result""] == ""FAILURE"" + assert f""Machine with uuid `{deploy_id}` is not in running state so cannot fetch ip"" in res[""error""], res + + " -/tags/rest/delete/{id}/,requesting to delete tag using invalid tag_id,"{ -tag_id = id -}","{ -""status"" : 404, -""message"" : ""Tag does not exist"" -}","def test_tags_delete_with_invalid_id(run_api): +/deploy/rest/protect/,successfully protecting the machine deployment,,"{ + ""status"" : 200 +}","def test_deploy_protect(deploy_protect): """""" - tag delete invalid id + Protect a mc Deployment """""" - tag_id = 0 - r = run_api.tag_delete(id=tag_id, params={}) - test_assert.status(r, 404) - rjson = r.json() - assert rjson['error'] == ""Tag does not exist"", ""The error message is %s"" % (rjson['error']) + r = deploy_protect + test_assert.status(r, 200) " -/tags/rest/delete/{id}/,requesting to delete tag using invalid token,"{ -tag_id = id -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}"," -def test_tags_delete_invalid_token(invalid_exec_api): +/deploy/rest/protect/,protecting the machine deployment by an admin user,,"{ + ""status"" : 200 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_protect_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): """""" - invalid token + Protect a mc Deployment by Admin """""" - tag_id = 0 - r = invalid_exec_api.tag_delete(id=tag_id, params={}) - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) + # Admin check for Protect a mc Deployment created by different user. + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_protect(deploy_id) + test_assert.status(r, 200) + r = run_api.deploy_unprotect(deploy_id) " -/tags/rest/delete/{id}/,"requesting to delete tag with valid existing id and the tag is in [session_id, session_created_on, session name, deployment ip, deployment mac]",,"{ -""status"" : 400, -""message"" : ""Delete not allowed"" -}","def test_tags_delete_with_undeletable_tag(library_add_new_vm, run_api): +/deploy/rest/protect/,protecting the machine deployment by non-admin user,,"{ + ""status"" : 403 +}","@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_protect_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): """""" - tag delete tags are '_sessionid', '_session_created_on' + Protect a mc Deployment by non-Admin """""" - params, r = library_add_new_vm - vm_uuid = r['uuid'] - tag_params, result = run_api.tag_add(vm_uuid) - res = run_api.tag_list(tag_params, filter_search={""object_uuid"": vm_uuid}) - results = res.json()[""results""] - newtag = [tag for tag in results if tag['name'] in ('_sessionid', '_session_created_on')][0] - r = run_api.tag_delete(id=newtag[""id""], params={}) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['result'] == ""Delete not allowed"", ""The error message is %s"" % (rjson['result']) + # Non-admin check for Protect a mc Deployment by different user. + deploy_id = custom_lib_admin_operations + r = run_api.deploy_protect(deploy_id) + test_assert.status(r, 403) + r = run_api.deploy_unprotect(deploy_id) " -/tags/rest/delete/{id}/,"requesting to delete tag with valid existing id and the tag is not in [session_id, session_created_on, session name, deployment ip, deployment mac]",,"{ -""status"" : 204, -""response"" : tag deleted -}","def test_tags_delete(library_add_new_vm, run_api): +/deploy/rest/protect/,protecting the machine deployment by manager who has rights over the server,,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_deploy_protect_manager_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - tag delete + Protect a mc Deployment """""" - params, r = library_add_new_vm - vm_uuid = r['uuid'] - tag_params, result = run_api.tag_add(vm_uuid) - res = run_api.tag_list(tag_params, filter_search={""object_uuid"": vm_uuid}) - results = res.json()[""results""] - newtag = [tag for tag in results if tag['name'] not in ('_sessionid', '_session_created_on')][0] - r = run_api.tag_delete(id=newtag[""id""], params={}) - test_assert.status(r, 204) + # When the user is not part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_protect(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + r = run_api.deploy_unprotect(deploy_id) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_protect(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + r = run_api.deploy_unprotect(deploy_id) " -/tags/rest/delete/{id}/,requesting to delete tag without authorization,"{ -tag_id = id -}","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided."" -}","def test_tags_delete_without_authorization(anonymous_exec_api): +/deploy/rest/protect/,protecting the machine deployment by manager who does not have rights over the server,,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_protect_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - without authorization + Protect a mc Deployment """""" - tag_id = 0 - r = anonymous_exec_api.tag_delete(id=tag_id, params={}) - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_protect(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + r = run_api.deploy_unprotect(deploy_id) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_protect(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + r = run_api.deploy_unprotect(deploy_id) " -/tags/rest/list/,requesting to fetch list of tags with invalid token,"{ -'page' = None, -'page_size' = None -}","{ -""status"" : 401, -""message"" : ""Invalid token"" +/deploy/rest/protect/,protecting the machine deployment using deploy id for which deployment does not exist,,"{ + ""status"" : 404, + ""message"" : ""Deployed Machine Protect: Unable to find deployment"" }"," -def test_tags_list_invalid_token(invalid_exec_api): +def test_deploy_protect_invalid_uuid(run_api): """""" - invalid token + Protect a invalid mc Deployment """""" - r = invalid_exec_api.tag_list({}, {}) - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Invalid token."", ""|> The error message is {}"".format(rjson['detail']) + deploy_id = ""invalid"" + r = run_api.deploy_protect(deploy_id) + res = r.json() + test_assert.status(r, 404) + assert res[""result""] == 'FAILURE', res + assert ""DeployedMachineProtect: Unable to find deployment"" in res[""error""], res " -/tags/rest/list/,requesting to fetch list of tags with Page and Page Size,"{ -'page' = 1, -'page_size' = 1 -}","{ -""status"" : 200, -""message"" : tag list for specific page -}","def test_tags_list_with_page_and_pagesize(run_api): +/deploy/rest/protect/,protecting the machine deployment which is a part of island,,"{ + ""status"" : 400, + ""message"" :""Protecting deployment which is part of island is not allowed"" +}"," +def test_deploy_protect_island_machine_uuid(run_api, ideploy_details): """""" - when requested with page and page size + Protect a mc Deployment which is part of an island """""" - params = {'page': 1, 'page_size': 1} - r = run_api.tag_list(params, {}) - test_assert.status(r, 200)" -/tags/rest/list/,requesting to fetch list of tags without authorization,"{ -'page' = 1, -'page_size' = 1 -}","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided."" -}","def test_tags_list_without_authorization(anonymous_exec_api): + param, result = ideploy_details + x = result.json() + machine_deploy_id = x[""machines""][0][""uuid""] + r = run_api.deploy_protect(machine_deploy_id) + res = r.json() + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE', res + assert ""Protecting deployment which is part of island is not allowed"" in res[""error""], res +" +/deploy/rest/unprotect/,unprotecting the machine deployment successfully,,"{ + ""status"" : 200 +}","def test_deploy_unprotect(deploy_unprotect): """""" - without authorization + Un-Protect a mc Deployment """""" - r = anonymous_exec_api.tag_list({}, {}) - test_assert.status(r, 401) - rjson = r.json() - assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The error message is {}"".format(rjson['detail']) + r = deploy_unprotect + test_assert.status(r, 200) " -/user/rest/add-group/{id}/,"requesting to add user to existing group using valid id.Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ -user_id = ""valid"" , -group_names = ""valid_group_list"" -}","{ -""status"" : 403 / 201, -""response"" : success -}","def test_user_add_group(run_api, user_add_group): +/deploy/rest/unprotect/,unprotecting the machine deployment by an admin user,,"{ + ""status"" : 200 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_unprotect_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): """""" - Adding multiple users into group + Un-Protect a mc Deployment by Admin """""" - r = user_add_group - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 201) + # Admin check for Un-Protect a mc Deployment created by different user. + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_protect(deploy_id) + r = run_api.deploy_unprotect(deploy_id) + test_assert.status(r, 200) " -/user/rest/add-group/{id}/,requesting to add user to existing group using valid id but invalid token,"{ -user_id = ""valid"" , -group_names = ""valid_group_list"" -}","{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_user_add_group_with_invalid_token(invalid_exec_api): +/deploy/rest/unprotect/,unprotecting the machine deployment by a non-admin user,,"{ + ""status"" : 403 +}","@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_deploy_unprotect_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): """""" - Adding user into group with invalid token + Un-Protect a mc Deployment by non-Admin """""" - groups_name = { - 'groups': ['0'] - } - template, r = invalid_exec_api.user_add_group(user_id=0, groups=groups_name) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Invalid token."" + # Non-admin check for Un-Protect a mc Deployment by different user. + deploy_id = custom_lib_admin_operations + r = run_api.deploy_protect(deploy_id) + r = run_api.deploy_unprotect(deploy_id) + test_assert.status(r, 403) " -/user/rest/add-group/{id}/,requesting to add user to existing group using valid id but without authorization,"{ -user_id = ""valid"" , -group_names = ""valid_group_list"" -}","{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_user_add_group_without_authorization(anonymous_exec_api): +/deploy/rest/unprotect/,unprotecting the machine deployment by manager who does not have rights over the server,,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_deploy_unprotect_manager_no_server_right(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - Adding user into group without authorization + Un-Protect a mc Deployment """""" - groups_name = { - 'groups': ['0'] - } - template, r = anonymous_exec_api.user_add_group(user_id=0, groups=groups_name) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Authentication credentials were not provided."" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_lib_admin_operations + r = run_api.deploy_protect(deploy_id) + r = run_api.deploy_unprotect(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_lib_non_admin_operations + r = run_api.deploy_protect(deploy_id) + r = run_api.deploy_unprotect(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) + + " -/user/rest/add-group/{id}/,requesting to add user to group using invalid user_id.Check the user type before performing the operation.,"{ -user_id = ""invalid"" , -group_names = ""valid_group_list"" +/deploy/rest/unprotect/,unprotecting the machine deployment using deploy id for which deployment does not exist,"{ +deploy_id : ""invalid"" }","{ -""status"" : 403 / 404 -}","PARAMETERS = [{""action"": GROUP_ADD}] - -@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) -def test_user_add_group_invalid_user_id(run_api, custom_group_admin_operations): + ""status"" : 404, + ""message"" : ""Deployed Machine Unprotect: Unable to find deployment"" +}","def test_deploy_unprotect_invalid_uuid(run_api): """""" - Adding invalid user id into group + Un-Protect a invalid mc Deployment """""" - params, r = custom_group_admin_operations + deploy_id = ""invalid"" + r = run_api.deploy_unprotect(deploy_id) res = r.json() - group_name = { - 'groups': [res['name']] - } - template, r = run_api.user_add_group(user_id=0, groups=group_name) - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(r, 404) + test_assert.status(r, 404) + assert res[""result""] == 'FAILURE', res + assert ""DeployedMachineUnprotect: Unable to find deployment"" in res[""error""], res " -/user/rest/add-group/{id}/,"requesting to add user to group using valid user_id where group name provided is an integer instead of string. Check the user type before performing the operation. -","{ -user_id = ""valid"" , -group_names = ""invalid_group_list_datatype"" -}","{ -""status"" : 403 / 400, -""response"" : ""Provided group names must be a list of strings"" -}","def test_user_add_group_invalid_data(run_api, admin_exec_api): +/deploy/rest/unprotect/,"unprotecting the machine deployment where the deployment is a part of island. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 400, + ""message"" : ""UnProtecting deployment which is part of island is not allowed"" +}"," +def test_deploy_unprotect_island_machine_uuid(run_api, ideploy_details): """""" - Provide integer instead of string in group name list + Un-Protect a mc Deployment which is part of an island """""" - groups_name = { - 'groups': [1] - } - user_result = admin_exec_api.user_list() - res = user_result.json() - user_ids = [result['id'] for result in res['results']] - user_id = random.choice(user_ids) - template, r = run_api.user_add_group(user_id, groups_name) - if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 403) - elif run_api.user_type == USER_TYPE[""admin""]: - res = r.json() - assert res['error'] == ""Provided groups must be a list of Group's UUIDs."", ""|> Json %s"" % res - test_assert.status(r, 400) - + param, result = ideploy_details + x = result.json() + machine_deploy_id = x[""machines""][0][""uuid""] + r = run_api.deploy_unprotect(machine_deploy_id) + res = r.json() + test_assert.status(r, 400) + assert res[""result""] == 'FAILURE', res + assert ""UnProtecting deployment which is part of island is not allowed"" in res[""error""], res " -/user/rest/add-group/{id}/,"requesting to add user to non-existing group. Check the user type before performing the operation. +/group/rest/add-ldap-user/,"adding ldap user to the group using group id that does not exist. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ -user_id = ""valid"" , -group_names = ""invalid_group_list"" +group_id : 0 }","{ -""status"" : 403 / 404, -""message"" : ""Group matching query does not exist."" -}","def test_user_add_group_invalid_grp_name(run_api, admin_exec_api): + ""status"" : 400, + ""message"" : ""Group does not exist"" +}","def test_group_add_ldap_user_invalid_group_id(run_api): """""" - Adding user into invalid group name + group add ldap user invalid group id """""" - groups_name = { - 'groups': ['0'] - } - user_result = admin_exec_api.user_list() - res = user_result.json() - user_ids = [result['id'] for result in res['results']] - user_id = random.choice(user_ids) - template, r = run_api.user_add_group(user_id, groups_name) + group_id = 0 + r = run_api.group_add_ldap_user(group_id, params={""users_list"": []}) if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: test_assert.status(r, 403) elif run_api.user_type == USER_TYPE[""admin""]: - res = r.json() - assert res['error'] == ""Provided groups must be a list of Group's UUIDs."" test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""Group does not exist"", ""|> Json %s"" % rjson " -/user/rest/change_ownership/,"changing ownership of user where the owner and destination user are the same. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ - owner = 'colama' - dest_user = 'colama' -}","{ -""status"" : 400, -""message"" : 'The dest_user and the owner should be different' -} -","def test_user_change_ownership_when_owner_and_dest_user_are_same(run_api): +/group/rest/add-ldap-user/,"adding ldap user to the group without the user list. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 400 +}","@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD}], indirect=True) +def test_group_add_ldap_userwithout_user_list(custom_group_admin_operations, run_api): """""" - user change_ownership when owner and dest user are same + Group add ldap user with out user list """""" - owner = 'colama' - dest_user = 'colama' - r = run_api.user_change_ownership(owner, dest_user) - if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: - rjson = r.json() - test_assert.status(r, 400) - assert rjson['error'] == 'The dest_user and the owner should be different', ""|> Json %s"" % rjson - elif run_api.user_type == USER_TYPE[""non_admin""]: + params, r = custom_group_admin_operations + group_id = r.json()['id'] + r = run_api.group_add_ldap_user(group_id, params={}) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 400) rjson = r.json() - assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson" -/user/rest/change_ownership/,"changing ownership of user where the owner is valid but destination user does not exist. Check the user type before performing the operation, only admin user type have the permission to perform such operations. + assert rjson['error'] == ""'users_list'"", ""|> Json %s"" % rjson +" +/group/rest/promote-to-manager/,"promoting a normal user to manager.Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 202 +}","@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_promote_to_manager(run_api, custom_group_admin_operations): + """""" + Promote from Normal user to Manager + """""" + params, r = custom_group_admin_operations + group_id = params[""group_id""] + users_list = params[""users_list""] + ret = run_api.promote_to_manager(group_id, params={""users_list"": users_list}) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(ret, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(ret, 202) +" +/group/rest/promote-to-manager/,"promoting a normal user to manager using invalid group id.Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ - owner = 'colama' - dest_user = 'doesnotexistuser' + group_id = 0 + users_list = [0] }","{ -""status"" : 400, -""message"" : ""Either User owner or dest_user does not exist..."" -} -","def test_user_change_ownership_user_doesnot_exits(run_api): + ""status"" : 400, + ""message"" : ""Group does not exist."" +}","def test_group_promote_to_manager_invaild_group_id(run_api): """""" - user does not exits + invalid group id """""" - owner = 'colama' - dest_user = 'doesnotexistuser' - r = run_api.user_change_ownership(owner, dest_user) - if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: - rjson = r.json() - test_assert.status(r, 400) - assert rjson['error'] == f""Either User '{owner}' or '{dest_user}' does not exist..."", ""|> Json %s"" % rjson - elif run_api.user_type == USER_TYPE[""non_admin""]: + group_id = 0 + users_list = [0] + r = run_api.promote_to_manager(group_id, params={""users_list"": users_list}) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 400) rjson = r.json() - assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + assert rjson['error'] == 'Group does not exist', ""|> Json %s"" % rjson " -/user/rest/change_ownership/,"changing ownership of user where the destination user does not have right over the owner. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +/group/rest/promote-to-manager/,"promoting a normal user to manager using invalid user id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ - owner = 'colama' - dest_user = 'manager' + group_id, + users_list = [0] }","{ -""status"" : 400, -""message"" : ""'manager' as a Manager user, does not have right over 'colama' or 'manager'"" -} -","def test_user_change_owner_doesnot_have_right(skip_if_admin, run_api): + ""status"" : 207, + ""message"" : User [0] isn't part of the group"" +}","@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD}], indirect=True) +def test_group_promote_to_manager_invaild_user_id(custom_group_admin_operations, run_api): """""" - user does not have right over user + User does not exits """""" - owner = 'colama' - dest_user = 'manager' - r = run_api.user_change_ownership(owner, dest_user) - if run_api.user_type == USER_TYPE[""manager""]: - rjson = r.json() - test_assert.status(r, 400) - assert rjson['error'] == ""'manager' as a Manager user, does not have right over 'colama' or 'manager'"", ""|> Json %s"" % rjson - elif run_api.user_type == USER_TYPE[""non_admin""]: + params, r = custom_group_admin_operations + group_id = r.json()['id'] + group_name = params['name'] + users_list = [0] + r = run_api.promote_to_manager(group_id, params={""users_list"": users_list}) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 207) rjson = r.json() - assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + assert rjson['failure'][0]['error'] == f""User [0] isn't part of the group [{group_name}]"", ""|> Json %s"" % rjson + + " -/user/rest/change_ownership/,"changing ownership of a user by a manager , where the manager does not have rights over the users. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -","{ -owner = ""vivekt"" -dest_user = ""manager"" -}","{ -""status"" : 400, -""message"":""Manager doesn't have full right over the user. Make sure 'vivekt' doesn't have any deployment on the server that the 'manager' user as Manager doesn't handle"" -}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) -def test_user_change_ownership_manager_does_not_have_deployment_server(skip_if_not_manager, run_api, custom_lib_non_admin_operations): +/group/rest/promote-to-manager/,"promoting a user to manager, where the user already has the managerial rights.",{,"{ + ""status"" : 207, + ""message"" :""User already has 'Manager' rights"" +}","@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD_MANAGER_AS_MANAGER}], indirect=True) +def test_group_promote_to_manager_who_has_already_manager_rights(skip_if_not_manager, custom_group_admin_operations, admin_exec_api): """""" - To test user_change_ownership endpoint when manager does not have full rights over the user + Group promote to manager who has already manager rights """""" - _ = custom_lib_non_admin_operations - owner = ""vivekt"" - dest_user = ""manager"" - res = run_api.user_change_ownership(owner, dest_user) - test_assert.status(res, 400) - rjson = res.json() - assert rjson['error'] == ""Manager doesn't have full right over the user. Make sure 'vivekt' doesn't have any deployment on the server that the 'manager' user as Manager doesn't handle"", ""|> Json %s"" % rjson + params, r = custom_group_admin_operations + group_id = params['group_id'] + users_list = params['users_list'] + r = admin_exec_api.promote_to_manager(group_id, params={""users_list"": users_list}) + test_assert.status(r, 207) + rjson = r.json() + assert rjson['failure'][0]['error'] == ""User already has 'Manager' rights"", ""|> Json %s"" % rjson + " -/user/rest/detail/{id},"fetching the details of user. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +/group/rest/promote-to-manager/,"promoting a normal user to manager without providing the user list. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ",,"{ -""status"" : 403 / 200 -}","def test_user_details(run_api, user_details): + ""status"" : 400 +}","@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD}], indirect=True) +def test_group_promote_to_manager_without_user_list(custom_group_admin_operations, run_api): """""" - Fetching the Details of User + Group promote to manager with out user list """""" - params, r = user_details - res = r.json() + params, r = custom_group_admin_operations + group_id = r.json()['id'] + r = run_api.promote_to_manager(group_id, params={}) if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: test_assert.status(r, 403) elif run_api.user_type == USER_TYPE[""admin""]: - test_assert.status(params, res, ""user_details"") - test_assert.status(r, 200) + test_assert.status(r, 400) + rjson = r.json() + assert rjson['error'] == ""'users_list'"", ""|> Json %s"" % rjson " -/user/rest/detail/{id},fetching the details of user using invalid id,"{ -user_id = ""invalid"" -}","{ -""status"" : 404 -}","def test_user_detail_with_invalid_id(run_api): +/group/rest/update/,"updating a group successfully. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 202 +}","@pytest.mark.parametrize(""custom_group_admin_operations"", PARAMETERS, indirect=True) +def test_group_update(skip_if_manager, run_api, custom_group_admin_operations): """""" - Fetching the details using invalid id - + Update group """""" - params, r = run_api.user_details(id=""invalid"") - test_assert.status(r, 404) + template, r = custom_group_admin_operations + if run_api.user_type == USER_TYPE[""non_admin""]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + result = r.json() + test_assert.status(template, result, ""group_update"") + test_assert.status(r, 202) +" +/group/rest/update/,updating a group where the manager is part of that group.,,,"@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD_MANAGER_AS_MANAGER}], indirect=True) +def test_group_update_as_manager_of_the_group(skip_if_not_manager, run_api, custom_group_admin_operations): + """""" + Update group for which the manager is part of + """""" + # if not run_api.user_type != USER_TYPE[""manager""]: + # pytest.skip(""skipped"") + params, r = custom_group_admin_operations + params, r = run_api.group_update(params[""group_id""]) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) +" +/group/rest/update/,updating a group where the manager is not part of that group.,,,"@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD_MANAGER_AS_NORMAL}], indirect=True) +def test_group_update_as_non_manager_of_the_group(skip_if_not_manager, run_api, custom_group_admin_operations): + """""" + Update group for which the manager is not part of + """""" + # if not run_api.user_type != USER_TYPE[""manager""]: + # pytest.skip(""skipped"") + params, r = custom_group_admin_operations + params, r = run_api.group_update(params[""group_id""]) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) + " -/user/rest/detail/{id},fetching the details of user using invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_user_details_with_invalid_token(invalid_exec_api): +/group/rest/update/,updating a group by a manager,,," +@pytest.mark.parametrize(""custom_group_admin_operations"", [{""action"": GROUP_ADD_MANAGER_AS_MANAGER}], indirect=True) +def test_group_update_as_manager(skip_if_not_manager, custom_group_admin_operations, run_api): """""" - Fetching the details of the user using invalid token + Group Update by Manager """""" - params, result = invalid_exec_api.user_details() - r = result.json() - test_assert.status(result, 401) - assert r['detail'] == ""Invalid token."" + params, r = custom_group_admin_operations + params, r = run_api.group_update(params[""group_id""]) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) " -/user/rest/detail/{id},"fetching the details of user using valid id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +/group/rest/update/,"updating a group by providing empty group name. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ -user_id = valid_user_id -}","{ -""status"" : 403 / 200 -}","def test_user_detail_with_valid_id(run_api): + ""name"": """", + ""deployment_strategy"": ""roundrobin"" + }","{ + ""status"" : 400, + ""message"" : ""Group Name is required and it can not be blank"" +}","def test_group_update_blank_name(skip_if_not_admin, group_add, run_api): """""" - Fetching the Details of User with valid id + update blank group name """""" + params, r = group_add + group_id = r.json()['id'] + group_param = { + ""name"": """", + ""deployment_strategy"": ""roundrobin"" + } + updated_param, r = run_api.group_update(group_id, group_param) if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: - params, res = run_api.user_details(id=run_api.user_id) - test_assert.status(res, 403) - + test_assert.status(r, 403) elif run_api.user_type == USER_TYPE[""admin""]: - params, res = run_api.user_details(id=run_api.user_id) - test_assert.status(res, 200) + result = r.json() + test_assert.status(r, 400) + assert result['error'] == ""Group Name is required and it can not be blank"" +" +/ideploy/rest/hostonly_info_file/,fetch hostonly information file from island deployment,,"{ + ""status"" : 200 +}","def test_ideploy_hostonly_info_file(run_api): + """""" + ideploy hostonly info file + """""" + r = run_api.ideploy_hostonly_info_file() + test_assert.status(r, 200) +" +/ideploy/rest/shutdown/,shutting down the island deployment,,"{ + ""status"" : 201 +}","def test_ideploy_shutdown_self(ideploy_shutdown): + """""" + Shutting down the Island + """""" + r = ideploy_shutdown + test_assert.status(r, 201) +" +/ideploy/rest/shutdown/,shutting down the island deployment by admin,,"{ + ""status"" : 201 +}","@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_shutdown_admin(skip_if_not_admin, custom_ilib_non_admin_operations, run_api): + """""" + Shutting down the Island by Admin + """""" + # Admin check of Starting a deployment created by different user + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_shutdown(deploy_id) + test_assert.status(r, 201) " -/user/rest/detail/{id},fetching the details of user without authorization,,"{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_user_detail_without_token(anonymous_exec_api): +/ideploy/rest/shutdown/,shutting down the island deployment by non-admin,,"{ + ""status"" : 403 +}","@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS, indirect=True) +def test_ideploy_shutdown_non_admin(skip_if_not_non_admin, custom_ilib_admin_operations, run_api): """""" - Fetching the user details without token + Shutting down the Island by non-admin """""" - params, result = anonymous_exec_api.user_details() - r = result.json() - test_assert.status(result, 401) - assert r['detail'] == ""Authentication credentials were not provided."" + # Non-admin check of Starting a deployment created by different user + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_shutdown(deploy_id) + test_assert.status(r, 403) " -/user/rest/list/,"fetching the list of users. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -""status"" : 200/ 403, -""response"" : fetched list of users -}","def test_user_list(run_api, user_list): +/ideploy/rest/shutdown/,shutting down the island deployment by manager who has rights over the server,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) +def test_ideploy_shutdown_manager_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Shutting down the Island by manager when have right on server + """""" + # When the user is not part of the group that the manager manages + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_shutdown(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=True)) + + # When the user is part of the group that the manager manages and deployment is on manager rights to server + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_shutdown(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=True)) + + +" +/ideploy/rest/shutdown/,shutting down the island deployment by manager who does not have rights over the server,,,"@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +@pytest.mark.parametrize(""custom_ilib_admin_operations"", PARAMETERS_NO_SRV_RIGHT, indirect=True) +def test_ideploy_shutdown_manager_no_server_right(skip_if_not_manager, custom_ilib_admin_operations, custom_ilib_non_admin_operations, run_api): + """""" + Shutting down the Island by manager when have no right on server + """""" + # When the user is not part of the group that the manager manages and the deployment is not on manager rightful server + deploy_id = custom_ilib_admin_operations + r = run_api.ideploy_shutdown(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False, manages_server=False)) + + # When the user is part of the group that the manager manages but the deployment is not on manager rightful server + deploy_id = custom_ilib_non_admin_operations + r = run_api.ideploy_shutdown(deploy_id) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True, manages_server=False)) +" +/library/rest/delete/,deleting a library successfully,,"{ + ""status"" : 204 +}","def test_lib_delete(library_delete): + """""" + Deleting the Library + """""" + x, r = library_delete + test_assert.status(r, 204) +" +/library/rest/delete/,deleting a library by admin,,"{ + ""status"" : 204 +}","@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +def test_lib_delete_admin(skip_if_not_admin, custom_lib_non_admin_operations, run_api): + """""" + Deleting the Library by Admin + """""" + # Admin check for deleting the Library created by different user. + lib_id = custom_lib_non_admin_operations + r = run_api.library_delete(lib_id, {}) + test_assert.status(r, 204) +" +/library/rest/delete/,deleting a library by non-admin,,"{ + ""status"" : 403 +}","@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_lib_delete_non_admin(skip_if_not_non_admin, custom_lib_admin_operations, run_api): """""" - Fetching the List of User + Deleting the Library by non-Admin """""" - r = user_list - if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 200) - elif run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) + # Non-admin check for deleting the Library created by different user. + lib_id = custom_lib_admin_operations + r = run_api.library_delete(lib_id, {}) + test_assert.status(r, 403) " -/user/rest/list/,"fetching the list of users setting the is_manager param set to False. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -""status"" : 200/ 403, -} ","def test_user_list_is_manager_is_false(run_api): +/library/rest/delete/,deleting a library by manager,,,"@pytest.mark.parametrize(""custom_lib_non_admin_operations"", PARAMETERS, indirect=True) +@pytest.mark.parametrize(""custom_lib_admin_operations"", PARAMETERS, indirect=True) +def test_lib_delete_manager(skip_if_not_manager, custom_lib_admin_operations, custom_lib_non_admin_operations, run_api): """""" - fetch user list when is_manager is false + Delete the Library by Manager """""" - params = { - 'is_manager': False - } - r = run_api.user_list(params) - rjson = r.json() - if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 200) - for users in rjson['results']: - is_manager = [group[""is_manager""] for group in users['groups']] - assert False in is_manager, ""The error is %s"" % rjson - elif run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + # When the user is not part of the group that the manager manages + lib_id = custom_lib_admin_operations + r = run_api.library_delete(lib_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=False)) + + # When the user is part of the group that the manager manages + lib_id = custom_lib_non_admin_operations + r = run_api.library_delete(lib_id, {}) + test_assert.status(r, manager_rights_response(endpoint, manages_user=True)) " -/user/rest/list/,"fetching the list of users setting the is_manager param set to True. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -""status"" : 200/ 403, -} ","def test_user_list_is_manager_is_true(run_api): +/library/rest/delete/,deleting the full tree of library,,"{ + ""status"" : 204 +}","def test_lib_delete_full_tree(run_api): """""" - fetch user list when is_manager is true + deleting library """""" - params = { - 'is_manager': True - } - r = run_api.user_list(params) + params, r = run_api.library_add_new_vm() rjson = r.json() - if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: - test_assert.status(r, 200) - for users in rjson['results']: - is_manager = [group[""is_manager""] for group in users['groups']] - assert True in is_manager, ""The error is %s"" % rjson - elif run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + uuid_lib = rjson[""uuid""] + r = run_api.deploy_image(uuid_lib) + depl_uuid = r.json()[""uuid""] + r = run_api.deploy_snapshot(depl_uuid) + snap_uuid = r.json()[""snapshotted_machine_uuid""] + run_api.deploy_image_delete(depl_uuid, {}) + r = run_api.library_delete(snap_uuid, params={""full_tree"": ""true""}) + test_assert.status(r, 204) " -/user/rest/list/,"fetching the list of users using filters. Check the user type before performing the operation. -",,"{ -""status"" : 200/ 403, -} ","@pytest.mark.xfail -def test_user_list_filter(skip_if_invalid_groups, run_api, user_list): +/library/rest/delete/,failing to delete the full tree of library,,"{ + ""status"" : 400 +}","def test_lib_delete_full_tree_failure(run_api): """""" - Fetching the List of User by filtering + deleting library failure """""" - groups = skip_if_invalid_groups - group_filter = {""group_id"": choice(groups), ""page_size"": 10} - exclude_group_filter = {""exclude_group_id"": choice(groups), ""page_size"": 10} - is_manager_filter = {""is_manager"": choice([True, False]), ""page_size"": 10} - r = user_list - if run_api.user_type == USER_TYPE[""non_admin""]: - test_assert.status(r, 403) - elif run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: - # expected result of users with exclude group filter - exclude_group_ids = [] - for i in r.json()[""results""]: - group_ids = [] - for j in i.get(""groups""): - group_ids.append(j.get(""id"")) - if exclude_group_filter.get(""exclude_group_id"") in group_ids and len(group_ids) - 1 >= 1: - exclude_group_ids.append(i.get(""id"")) - # expected result of users with is_manager filter - manager_check = [] - for i in r.json()[""results""]: - is_manager = [] - for j in i.get(""groups""): - is_manager.append(j.get(""is_manager"")) - if is_manager_filter.get(""is_manager"") is True and is_manager_filter.get(""is_manager"") in is_manager: - manager_check.append(True) - elif is_manager_filter.get(""is_manager"") is False and True not in is_manager: - manager_check.append(False) - exp_res = { - 0: [group_filter.get(""group_id"") for i in r.json()[""results""] for j in i.get(""groups"") if j.get(""id"") == group_filter.get(""group_id"")], - 1: exclude_group_ids, - 2: manager_check - } - filters = [group_filter, exclude_group_filter, is_manager_filter] - for filter in range(len(filters)): - params = filters[filter] - r = run_api.user_list(params) - # check for valid response data with the filter parameters - if r.json()[""count""] != len(exp_res[filter]): - logging.error(f""error in filter: {filters[filter]}, the list of expected result for the filter is: {exp_res[filter]}, and the actual result is {r.json()}"") - assert False - test_assert.status(r, 200) + params, r = run_api.library_add_new_vm() + rjson = r.json() + uuid_lib = rjson[""uuid""] + r = run_api.deploy_image(uuid_lib) + depl_uuid = r.json()[""uuid""] + r = run_api.deploy_snapshot(depl_uuid) + snap_uuid = r.json()[""snapshotted_machine_uuid""] + r = run_api.library_delete(snap_uuid, params={""full_tree"": ""true""}) + test_assert.status(r, 400) + run_api.deploy_image_delete(depl_uuid, {}) + run_api.library_delete(snap_uuid, params={""full_tree"": ""true""}) " -/user/rest/list/,"fetching the list of users using the group_id parameter. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -""status"" : 200/ 403, -""response"" : fetched list of users with the provided group_id -} ","def test_user_list_with_group_id(run_api, admin_exec_api): +/library/rest/delete/,deleting library when its deployment exist,,"{ + ""status"" : 400 +}","def test_lib_delete_with_deployment_exists(run_api, library_add_new_vm): """""" - Fetch user list in a group with group-id + When uuid exists and it has next revision/ deployement exists """""" - params, r = admin_exec_api.group_add() - group_uid = r.json()[""id""] - if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: - res = run_api.user_list(params={""group_id"": group_uid}) - test_assert.status(res, 200) - elif run_api.user_type == USER_TYPE[""non_admin""]: - res = run_api.user_list(params={""group_id"": group_uid}) - test_assert.status(res, 403) - r = admin_exec_api.group_delete(group_uid) + params, r = library_add_new_vm + lib_id = r[""uuid""] + deploy = run_api.deploy_image(lib_id) + r = run_api.library_delete(lib_id, {}) + test_assert.status(r, 400) + deployjson = deploy.json() + run_api.deploy_image_delete(deployjson['uuid'], {}) " -/user/rest/list/,"fetching the list of users using the search param . Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{ -""status"" : 200/ 403, -""response"" : fetched list of users when search param provided -}","def test_user_list_with_search_params(run_api, user_list): +/library/rest/delete/,deleting library using uuid that does not exist,"{ +lib_id : ""invalid"" +}","{ + ""status"" : 404 +}","def test_lib_delete_with_invalid_uuid(run_api): """""" - user list with search params + When machine uuid doesnot exist """""" - if run_api.user_type in [USER_TYPE[""admin""], USER_TYPE[""manager""]]: - result = run_api.user_list(params={""search"": run_api.user}) - test_assert.status(result, 200) - elif run_api.user_type == USER_TYPE[""non_admin""]: - result = run_api.user_list(params={""search"": run_api.user}) - test_assert.status(result, 403) + lib_id = ""invalid"" + ret = run_api.library_delete(lib_id) + test_assert.status(ret, 404) " -/user/rest/list/,fetching the list of users with invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_user_list_with_invalid_token(invalid_exec_api): +/library/rest/delete/,deleting library without authorization,"{ +lib_id : ""wrong"" +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_lib_delete_without_authorization(anonymous_exec_api): """""" - Fetch group list with invalid token + without authorization """""" - r = invalid_exec_api.group_list({}) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Invalid token."" + lib_id = 'wrong' + ret = anonymous_exec_api.library_delete(lib_id) + test_assert.status(ret, 401) + rjson = ret.json() + assert rjson['detail'] == ""Authentication credentials were not provided."", ""|> The Error is {}"".format(rjson['detail']) + + " -/user/rest/list/,fetching the list of users without authorization,,"{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_user_list_without_token(anonymous_exec_api): +/library/rest/delete/,deleting library with invalid token,"{ +lib_id : ""wrong"" +}","{ + ""status"" : 401, + ""message"" : ""Invalid Token"" +}","def test_lib_delete_with_invalid_token(invalid_exec_api): """""" - Fetch group list with unauthorized + with invalid token """""" - r = anonymous_exec_api.group_list({}) - res = r.json() - test_assert.status(r, 401) - assert res['detail'] == ""Authentication credentials were not provided."" + lib_id = 'wrong' + ret = invalid_exec_api.library_delete(lib_id) + test_assert.status(ret, 401) + rjson = ret.json() + assert rjson['detail'] == ""Invalid token."", ""|> The Error is {}"".format(rjson['detail']) " -/user/rest/logout,requesting to logout user,,"{ -""status"" : 200, -""response"" : user logged out successfully -}","def test_user_logout(user_logout): +/library/rest/library_filter_fields,fetching library filter fields,,"{ + ""status"" : 200 +}","def test_library_filter_fields(run_api): """""" - Logout the user + fetch library filter fields """""" - r = user_logout + r = run_api.library_filter_fields() test_assert.status(r, 200) " -/user/rest/logout,requesting to logout user using invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_user_logout_with_invalid_token(invalid_exec_api): +/library/rest/import_ova/,importing ova from library,,"{ + ""status"" : 201 +}","def test_library_import_ova(library_import_ova): """""" - Logout the user + Deploy image """""" - res = invalid_exec_api.user_logout() - result = res.json() - test_assert.status(res, 401) - assert result['detail'] == ""Invalid token."" + r = library_import_ova + test_assert.status(r, 201) " -/user/rest/logout,requesting to logout user without authorization,,"{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_user_logout_without_authorization(anonymous_exec_api): +/library/rest/segmentlist/,fetching library revisions,,"{ + ""status"" : 200 +}","def test_library_revisions(library_revisions): """""" - Logout the user + fetching library revisions """""" - r = anonymous_exec_api.user_logout() - result = r.json() - test_assert.status(r, 401) - assert result['detail'] == ""Authentication credentials were not provided."" + params, r = library_revisions + test_assert.status(r, 200) " -/user/rest/self/,fetching the data of logged in user,,200: should return object of user currently logged in,"def test_user_self(user_self): +/library/rest/segmentlist/,fetching library segment list,,"{ + ""status"" : 200 +}","def test_library_segmentlist(library_segmentlist): """""" - Fetching the data of logged in user + fetch segment list """""" - r = user_self + params, r = library_segmentlist test_assert.status(r, 200) " -/user/rest/self/,fetching the data of logged in user using invalid token,,"{ -""status"" : 401, -""message"" : ""Invalid token"" -}","def test_user_self_with_invalid_token(invalid_exec_api): +/library/rest/segmentlist/,"fetching library segment list using ""nic_type"" parameter","{ +'nic_type': 'bridge' +}",,"def test_library_segment_with_nic_type(library_add_new_vm, run_api): """""" - Fetching the data of logged in user with invalid token + Fetch library segment with nic type """""" - r = invalid_exec_api.user_self() - result = r.json() - test_assert.status(r, 401) - assert result['detail'] == ""Invalid token."" + p, res = library_add_new_vm + params = {'nic_type': 'bridge'} + r1 = run_api.library_segmentlist(params).json() + for segment in r1['results']: + assert segment['network_type'] == 'public' + params = {'nic_type': 'host'} + r2 = run_api.library_segmentlist(params).json() + for segment in r2['results']: + assert segment['network_type'] == 'hostOnly' " -/user/rest/self/,fetching the data of logged in user without authorization,,"{ -""status"" : 401, -""message"" : ""Authentication credentials were not provided"" -}","def test_user_self_without_authorization(anonymous_exec_api): +/library/rest/segmentlist/,"fetching library segment list using ""search"" parameter","{ +'search': 'host' +}",,"def test_library_segmentlist_with_search_param(library_add_new_vm, run_api): + """""" + fetch segmentlist with search params + """""" + p, r = library_add_new_vm + params = {'search': 'host'} + r = run_api.library_segmentlist(params).json() + for segment in r['results']: + segment['network_type'] == 'hostOnly' +" +/library/rest/segmentlist/,"fetching library segment list using ""network_type"" parameter","{ +'network_type': 'hostOnly' +}",,"def test_library_segmentlist_with_network_type(library_add_new_vm, run_api): """""" - Fetching the data of logged in user without authorization + fetch segmentlist with network type """""" - r = anonymous_exec_api.user_self() - result = r.json() - test_assert.status(r, 401) - assert result['detail'] == ""Authentication credentials were not provided."" + p, r = library_add_new_vm + params = {'network_type': 'hostOnly'} + r = run_api.library_segmentlist(params).json() + for segment in r['results']: + segment['network_type'] == 'hostOnly' " -audit/rest/list,getting the audit list without authorization,,"{""status"":401, -""message"":""Authentication credentials were not provided."" -}","def test_audit_list_without_authorization(anonymous_exec_api): +/library/rest/segmentlist/,"fetching library segment list using ""nic_type"" parameter","{ +'scope' +}","{ + ""status"" : 200 +}","def test_library_segmentlist_with_scope(library_add_new_vm, run_api): """""" - Audit list without authorization + fetch segmentlist with scope """""" - r = anonymous_exec_api.audit_list() - result = r.json() - test_assert.status(r, 401) - assert result['detail'] == ""Authentication credentials were not provided.""" -ideploy/rest/add-tags,"empty input data, expecting an error for missing required fields. Check the user type before performing the operation, only admin user type have the permission to perform such operations. -",,"{""status"": 400, ""message"": ""Input data is missing required 'island_list' and 'tags_list' keys.""}","def test_ideploy_change_ownership_with_missing_fields(run_api): + p, r = library_add_new_vm + params = {'scope': 'my'} + r = run_api.library_segmentlist(params).json() + for segment in r['results']: + segment['network_type'] in ['hostOnly', 'public'] + + # invalid scope + params2 = {'scope': 'invalid'} + r2 = run_api.library_segmentlist(params2) + test_assert.status(r2, 200) +" +/library/rest/upload_nvram/,uploading NVRam to library,,"{ + ""status"" : 200 +}","def test_library_upload_nvram(skip_if_aarch64, library_add_new_vm, run_api): """""" - change ownership with missing 'owner' field + upload nvram """""" - params = { - ""deployment_uuids"": [ - ""invalid"" - ], - ""dest_user"": ""manager"" - } - r = run_api.ideploy_change_ownership(params) - if run_api.user_type == USER_TYPE['non_admin']: - test_assert.status(r, 403) - rjson = r.json() - assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson - else: - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""owner or dest_user cannot be null or empty"", ""|> Json %s"" % rjson + p, r = library_add_new_vm + lib_id = r['uuid'] + r = run_api.library_upload_nvram(lib_id,) + test_assert.status(r, 200) " -ideploy/rest/add-tags,"providing empty island_list and non-empty tags_list, expecting an error for not enough islands.","{""island_list"": [], ""tags_list"": [[""tag1""]]}","{""status"": 400, ""message"": ""Not enough islands to add tags to.""}","def test_ideploy_add_tags_empty_island_list(run_api): +/library/rest/upload_nvram/,uploading NVRam to deployed machine of library,,"{ + ""status"" : 400, + ""message"" :""Upload of nvram is not allowed to a deployed machine."" +}","def test_library_upload_nvram_on_deploy_machine(deploy_image, run_api): """""" - invalid id + Upload Nvram on deployed machine """""" - params = { - ""island_list"": [ - ], - ""tags_list"": [ - [ - ""tag_name"" - ] - ] - } - r = run_api.ideploy_add_tag(params) + p, res = deploy_image + lib_id = res.json()['machine']['uuid'] + r = run_api.library_upload_nvram(lib_id,) test_assert.status(r, 400) rjson = r.json() - assert rjson['error'] == ""{'island_list': [ErrorDetail(string='This list may not be empty.', code='empty')]}"", ""|> Json %s"" % rjson + assert rjson['error'] == ""Upload of nvram is not allowed to a deployed machine."", ""|> json %s"" % rjson + + " -ideploy/rest/add-tags,"providing invalid UUID in island_list, expecting an error for invalid island UUID.","{""island_list"": [""invalid_UUID""], ""tags_list"": [[""tag1""]]}","{""status"": 400, ""message"": ""Invalid island UUID.""}"," -def test_ideploy_add_tags_invalid_island_id(run_api): +/library/rest/upload_nvram/,uploading NVRam to library using invalid lib uuid,"{ +lib_id : ""invalid"" +}","{ + ""status"" : 404, + ""message"" : ""Machine does not exist"" +}","def test_library_upload_nvram_invalid_lib_uuid(run_api): """""" - invalid id + upload nvram with invalid uuid """""" - params = { - ""island_list"": [ - ""inUUID"" - ], - ""tags_list"": [ - [ - ""tag_name"" - ] - ] - } - r = run_api.ideploy_add_tag(params) - test_assert.status(r, 400) + lib_id = ""invalid"" + r = run_api.library_upload_nvram(lib_id,) + test_assert.status(r, 404) rjson = r.json() - assert rjson['error'] == ""DeployedIsland matching query does not exist."", ""|> Json %s"" % rjson + assert rjson['error'] == ""Machine does not exist"", ""|> json %s"" % rjson + " -ideploy/rest/add-tags,providing more number of islands than tags and eventually expecting an error of not enough tags provided,"{""island_list"": [""UUID1"", ""UUID2"", ""UUID3""], ""tags_list"": [[""tag1""], [""tag2""]]}","{""status"": 400, ""message"": ""Not enough tags provided.""}","def test_ideploy_add_tags_more_tag_count(run_api): +/library/rest/upload_nvram/,uploading NVRam to library using incompatible NVRam template,"{ +template=""/opt/infracc/infracc/nvram_templates/AAVMF_VARS.fd"" +}","{ + ""status"" : 400, + ""message"" : ""Cannot choose `/opt/infracc/infracc/nvram_templates/AAVMF_VARS.fd` for `x86_64` Architecture of the Machine"" +}","def test_library_upload_nvram_incompatible_nvram_template(skip_if_aarch64, library_add_new_vm, run_api): """""" - ideploy add tags more than island comut """""" - tag_name = ""test_tag"" - params = { - ""island_list"": [ - ""invalid"" - ], - ""tags_list"": [ - [ - tag_name - ], - [ - tag_name - ] - ] - } - r = run_api.ideploy_add_tag(params) + p, r = library_add_new_vm + lib_id = r['uuid'] + r = run_api.library_upload_nvram(lib_id, template=""/opt/infracc/infracc/nvram_templates/AAVMF_VARS.fd"") test_assert.status(r, 400) rjson = r.json() - assert rjson['error'] == ""Not enough islands to add tags to."", ""|> Json %s"" % rjson - + assert rjson['error'] == ""Cannot choose `/opt/infracc/infracc/nvram_templates/AAVMF_VARS.fd` for `x86_64` Architecture of the Machine"", ""|> json %s"" % rjson " -ideploy/rest/add-tags,"providing more number of tags than islands, expecting an error of not enough islands to add tags","{""island_list"": [""UUID1""], ""tags_list"": [[""tag1""], [""tag2""]]}","{""status"": 400, ""message"": ""Not enough islands to add tags to.""}","def test_ideploy_add_tags_more_tag_count(run_api): +/profile/rest/get/,"getting details of profile. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 200 +}","def test_profile_details(run_api, profile_details): """""" - ideploy add tags more than island comut + Getting details of profile """""" - tag_name = ""test_tag"" - params = { - ""island_list"": [ - ""invalid"" - ], - ""tags_list"": [ - [ - tag_name - ], - [ - tag_name - ] - ] - } - r = run_api.ideploy_add_tag(params) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""Not enough islands to add tags to."", ""|> Json %s"" % rjson + r = profile_details + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + test_assert.status(r, 200) " -ideploy/rest/add-tags,"providing non-empty island_list and empty tags_list, expecting an error for not enough tags.","{""island_list"": [""UUID1""], ""tags_list"": []}","{""status"": 400, ""message"": ""Not enough tags provided.""}","def test_ideploy_add_tags_empty_island_list(run_api): +/profile/rest/get/,"getting details of profile using invalid id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +user_id : 0 +}","{ + ""status"" : 400 / 404 +}","def test_profile_details_with_invalid_id(run_api): """""" - invalid id + Getting details of profile by invalid id """""" - params = {""island_list"": [""UUID1""], - ""tags_list"": [] - } - r = run_api.ideploy_add_tag(params) - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""{'island_list': [ErrorDetail(string='This list may not be empty.', code='empty')]}"", ""|> Json %s"" % rjson + r = run_api.profile_details(user_id=0) + if run_api.user_type in [USER_TYPE[""non_admin""], USER_TYPE[""manager""]]: + test_assert.status(r, 403) + elif run_api.user_type == USER_TYPE[""admin""]: + status_code = r.status_code + assert status_code in [400, 404] " -ideploy/rest/add-tags,successful deployment operation when equal number of deployed islands and tags provided ,"{""island_list"": [""UUID1"", ""UUID2""], ""tags_list"": [[""tag1""], [""tag2""]]}","{""status"": 201, ""message"": ""Created""}","def test_ideploy_add_tags(ideploy_deploy, run_api): +/profile/rest/get/,"getting details of profile without authorization. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +user_id : 0 +}","{ + ""status"" : 401, + ""message"" : ""Authentication credentials were not provided."" +}","def test_profile_details_without_authorization(anonymous_exec_api): """""" - ideploy add tags + Getting details of profile without authorization """""" - p, r = ideploy_deploy - uuid = r.json()['deploy_uuid'] - tag_name = ""test_tag"" - params = { - ""island_list"": [ - uuid - ], - ""tags_list"": [ - [ - tag_name - ] - ] - } - r = run_api.ideploy_add_tag(params) - test_assert.status(r, 201) - island_detail = run_api.ideploy_details(uuid).json() - all_tags = [tag['value'] for tag in island_detail['tags']] - assert tag_name in all_tags, ""|> Json %s"" % island_detail + r = anonymous_exec_api.profile_details(user_id=0) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Authentication credentials were not provided."" " -ideploy/rest/change_ownership,"Attempting to change ownership when the owner and destination user are the same. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +/profile/rest/get/,"getting details of profile using invalid token. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ - ""deployment_UUIDs"": [ - ""string"" - ], - ""owner"": ""colama"", - ""dest_user"": ""colama"" - }","{""status"": 400, ""message"": ""The dest_user and the owner should be different""}","def test_ideploy_change_ownership_same_owner_and_dest_owner(run_api): +user_id : 0 +}","{ + ""status"" : 401, + ""message"" : ""Invalid Token"" +}","def test_profile_details_with_invalid_token(invalid_exec_api): """""" - ideploy change ownership + Getting details of profile with invalid token """""" - params = { - ""deployment_UUIDs"": [ - ""string"" - ], - ""owner"": ""colama"", - ""dest_user"": ""colama"" - } - r = run_api.ideploy_change_ownership(params) - if run_api.user_type == USER_TYPE['non_admin']: - test_assert.status(r, 403) - rjson = r.json() - assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson - else: - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""The dest_user and the owner should be different"", ""|> Json %s"" % rjson" -ideploy/rest/change_ownership,"attempting to change ownership where the destination user does not exist check. Check the user type before performing the operation, only admin user type have the permission to perform such operations. + r = invalid_exec_api.profile_details(user_id=0) + res = r.json() + test_assert.status(r, 401) + assert res['detail'] == ""Invalid token."" +" +/rtask/rest/list/,listing the status of rtasks,,"{ + ""status"" : 200 +}","def test_rtask_list_status(rtask_list_status): + """""" + Listing the status of rtasks + """""" + params, r = rtask_list_status + test_assert.status(r, 200) +" +/servers/rest/backup-manifest/,"testing server backup-manifest api using empty string token. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ - ""deployment_uuids"": ['invalid'], - ""owner"": ""colama"", - ""dest_user"": ""non-exiting-user"" - }","{""status"": 400, ""message"": ""Either User 'currentowner' or 'nonexistentuser' does not exist...""}","def test_ideploy_change_ownership_user_does_not_exits(run_api): +""token"" : """" +}","{ + ""status"" : 403, + ""message"" : Token Required.' +}","def test_server_backup_manifest_empty_token(run_api): """""" - One of the user does not exits + testing backup-manifest api using empty string token """""" - params = { - ""deployment_uuids"": ['invalid'], - ""owner"": ""colama"", - ""dest_user"": ""non-exiting-user"" - } - r = run_api.ideploy_change_ownership(params) - if run_api.user_type == USER_TYPE['non_admin']: - test_assert.status(r, 403) - rjson = r.json() - assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + params = {""token"": """"} + r = run_api.server_backup_manifest(params) + rjson = r.json() + if run_api.user_type == 'admin': + test_assert.status(r, 400) + assert rjson['error'] == 'Token Required', ""|> Json %s"" % rjson else: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + + +" +/servers/rest/backup-manifest/,"testing server backup-manifest api using invalid token. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ +""token"" : ""invalid"" +}","{ + ""status"" : 401, + ""message"" : ""Invalid Token"" +}","def test_server_backup_manifest_invalid_token(run_api): + """""" + testing backup-manifest api using invalid token + """""" + params = {""token"": ""invalid""} + r = run_api.server_backup_manifest(params) + rjson = r.json() + if run_api.user_type == 'admin': test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""Either User 'colama' or 'non-exiting-user' does not exist..."", ""|> Json %s"" % rjson + assert rjson['reason'] == 'Invalid Token', ""|> Json %s"" % rjson + else: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson " -ideploy/rest/change_ownership,Attempting to change ownership where the owner does not exist.,"{""deployment_UUIDs"": [""UUID1"", ""UUID2""], ""owner"": ""nonexistentowner"", ""dest_user"": ""newowner""}","{""status"": 400, ""message"": ""Either User 'nonexistentowner' or 'newowner' does not exist...""}","def test_ideploy_change_ownership_for_not_existing_owner(skip_if_not_admin, run_api, custom_lib_non_admin_operations): +/servers/rest/backup-token/,"creating a backup token for the server. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 201 +}","def test_server_backup_token(run_api): """""" - To change ownership of deployed machine if one of the user do not exit + create a backup token for the server """""" - params = { - ""deployment_uuids"": ['invalid'], - ""owner"":""non-exiting-user"", - ""dest_user"": ""manager"" - } - r = run_api.ideploy_change_ownership(params) - if run_api.user_type == USER_TYPE['non_admin']: - test_assert.status(r, 403) - rjson = r.json() - assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + r = run_api.server_backup_token() + rjson = r.json() + if run_api.user_type == 'admin': + test_assert.status(r, 201) + assert ""token"" in rjson, rjson else: - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""Owner does not exist..."", ""|> Json %s"" % rjson + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson " -ideploy/rest/change_ownership,"Attempting to change ownership with an empty list of UUIDs. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +/servers/rest/fetch_sql_datadir_path/,"fetching SQL data dir path from server using invalid server id. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ - ""deployment_uuids"": [], - ""owner"": ""vivekt"", - ""dest_user"": ""manager"" -}","{""status"": 400, ""message"": ""please provide list of uuids""}","def test_ideploy_change_ownership_empty_list_uuid(run_api): +""server_id"" : ""invalid"" +}","{ + ""status"" : 404, + ""message"" : ""Server not found"" +}","def test_server_fetch_sql_datadir_path_invalid_server_id(run_api): """""" - change ownership with an empty list of UUIDs. + testing fetch_sql_datadir_path api using invalid server id """""" - params = { - ""deployment_uuids"": [], - ""owner"": ""vivekt"", - ""dest_user"": ""manager"" - } - r = run_api.ideploy_change_ownership(params) - if run_api.user_type == USER_TYPE['non_admin']: - test_assert.status(r, 403) - rjson = r.json() - assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson + server_id = 'invalid' + r = run_api.server_fetch_sql_datadir_path(server_id) + rjson = r.json() + if run_api.user_type == 'admin': + test_assert.status(r, 404) + assert rjson['error'] == 'Server not found', ""|> Json %s"" % rjson else: - test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == 'please provide list of uuids', ""|> %s"" % rjson" -ideploy/rest/change_ownership,"Changing ownership with invalid deployment UUIDs format. Check the user type before performing the operation, only admin user type have the permission to perform such operations. + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + + +" +/servers/rest/fetch_sql_datadir_path/,"fetching SQL data dir path from server using invalid credentials. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ - ""deployment_uuids"": - {}, - ""owner"": ""vivekt"", - ""dest_user"": ""manager"" - }","{""status"": 400, ""message"": ""Error message explaining invalid input format for UUIDs""}","def test_ideploy_change_ownership_with_invalid_data_type(run_api): + ""username"": ""invalid"", + ""password"": ""invalid"", + ""port"": 22 +}","{ + ""status"" : 400, + ""message"" : ""Authentication failed"" +}","def test_server_fetch_sql_datadir_path_invalid_credentials(run_api): """""" - invalid input format for changing ownership + testing fetch_sql_datadir_path api using invalid credentials """""" params = { - ""deployment_uuids"": - {}, - ""owner"": ""vivekt"", - ""dest_user"": ""manager"" + ""username"": ""invalid"", + ""password"": ""invalid"", + ""port"": 22 } - r = run_api.ideploy_change_ownership(params) - if run_api.user_type == USER_TYPE['non_admin']: - test_assert.status(r, 403) - rjson = r.json() - assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson - else: + server_id = choice(list(run_api.clm_my_servers.values())) + r = run_api.server_fetch_sql_datadir_path(server_id, params) + res = r.json() + if run_api.user_type == 'admin': test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""deployment_uuids cannot be null or empty"", ""|> Json %s"" % rjson + assert 'FAILURE' in res[""result""], res + assert ""Authentication failed"" in res[""error""], res + else: + test_assert.status(r, 403) + assert res['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % res + + " -ideploy/rest/change_ownership,"Partial success in changing ownership where some UUIDs fail. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +/servers/rest/fetch_sql_datadir_path/,"fetching SQL data dir path from server using invalid port number. Check the user type before performing the operation, only admin user type have the permission to perform such operations. ","{ - ""deployment_uuids"": [ - ""invalid"" - ], - ""owner"": ""vivekt"", - ""dest_user"": ""manager"" - }","{""status"": 207, ""message"": ""These objects failed to change their ownership: [\""invalid_UUID\""]""}","def test_ideploy_change_ownership_invalid_id(skip_if_non_admin, run_api): + ""username"": ""string"", + ""password"": ""string"", + ""port"": 454 +}","{ + ""status"" : 400, + ""message"" : ""Unable to connect to port"" +}","def test_server_fetch_sql_datadir_path_incorrect_port(run_api): """""" - Partial success in changing ownership where some UUIDs fail. + testing fetch_sql_datadir_path api using incorrect port number """""" params = { - ""deployment_uuids"": [ - ""invalid"" - ], - ""owner"": ""vivekt"", - ""dest_user"": ""manager"" + ""username"": ""string"", + ""password"": ""string"", + ""port"": 454 } - r = run_api.ideploy_change_ownership(params) - if run_api.user_type == USER_TYPE['non_admin']: - test_assert.status(r, 403) - rjson = r.json() - assert rjson['detail'] == ""You do not have permission to perform this action."", ""|> Json %s"" % rjson - else: + server_id = choice(list(run_api.clm_my_servers.values())) + r = run_api.server_fetch_sql_datadir_path(server_id, params) + res = r.json() + if run_api.user_type == 'admin': test_assert.status(r, 400) - rjson = r.json() - assert rjson['error'] == ""The count of provided UUIDs doesn't match with the count of existing Deployments. Make sure that the provided UUIDs are valid, the deployment(s) is/are not a part of any Island, they belong to the 'vivekt' user and are for one category, either DeployedMachine or DeployedIsland"", ""|> Json %s"" % rjson + assert 'FAILURE' in res[""result""], res + assert ""Unable to connect to port"" in res[""error""], res + else: + test_assert.status(r, 403) + assert res['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % res + " -ideploy/rest/change_ownership,Successful change of ownership from one user to another where both users exist and the requester has the necessary permissions,"{ - ""deployment_uuids"": [ - deploy_id - ], - ""owner"": ""vivekt"", - ""dest_user"": ""manager"" - }","{""status"": 200, ""message"": ""Operation performed successfully without any error""}"," -@pytest.mark.parametrize(""custom_ilib_non_admin_operations"", PARAMETERS_SRV_RIGHT, indirect=True) -def test_ideploy_change_ownership(skip_if_non_admin, custom_ilib_non_admin_operations, run_api): +/servers/rest/fetch_sql_datadir_path/,"fetching SQL data dir path from server by setting username to NULL. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""username"": None, + ""password"": ""string"", + ""port"": 22 + }","{ + ""status"" : 400, + ""message"" : ""This field may not be null"" +}","def test_server_fetch_sql_datadir_path_null_username(run_api): """""" - Successful change of ownership from one user to another + testing fetch_sql_datadir_path api using username as NULL """""" - deploy_id = custom_ilib_non_admin_operations params = { - ""deployment_uuids"": [ - deploy_id - ], - ""owner"": ""vivekt"", - ""dest_user"": ""manager"" + ""username"": None, + ""password"": ""string"", + ""port"": 22 } - r = run_api.ideploy_change_ownership(params) - test_assert.status(r, 200) - island_detail = run_api.ideploy_details(deploy_id).json() - assert island_detail['island']['owner'] == 'manager', ""|> Json %s"" % island_detail - ilib_id = island_detail['island']['deploy_for']['uuid'] - run_api.ideploy_delete(deploy_id) - run_api.ilibrary_delete(ilib_id) + server_id = choice(list(run_api.clm_my_servers.values())) + r = run_api.server_fetch_sql_datadir_path(server_id, params) + res = r.json() + if run_api.user_type == 'admin': + test_assert.status(r, 400) + assert 'FAILURE' in res[""result""], res + assert ""This field may not be null"" in res[""error""], res + else: + test_assert.status(r, 403) + assert res['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % res + " -ideploy/rest/change_ownership,chaning ownership of an invalid deployed island from non-admin by an admin user ,"{ - ""deployment_uuids"": [""invalid""], - ""owner"", - ""dest_user"", -}","{ -""status"" : 400, -""message"":""Make sure that the provided UUIDs are valid"" -}","def test_ideploy_change_ownership_invalid_uuid(skip_if_not_admin, non_admin_exec_api, run_api): +/servers/rest/fetch_sql_datadir_path/,"fetching SQL data dir path from server by using valid parameter. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +","{ + ""username"", + ""password"", + ""port"": 22 + }","{ + ""status"" : 200, +}","def test_server_fetch_sql_datadir_path(run_api): """""" - To change ownership of invalid deployed island from non-admin user to admin user by admin + testing fetch_sql_datadir_path api using valid params """""" params = { - ""deployment_uuids"": [""invalid""], - ""owner"": non_admin_exec_api.user, - ""dest_user"": run_api.user + ""username"": DEFAULT_ROOT_ACCOUNT[""user""], + ""password"": DEFAULT_ROOT_ACCOUNT[""password""], + ""port"": 22 } - res = run_api.ideploy_change_ownership(params) - rjson = res.json() - test_assert.status(res, 400) - assert ""Make sure that the provided UUIDs are valid"" in rjson[""error""], rjson + server_id = choice(list(run_api.clm_my_servers.values())) + r = run_api.server_fetch_sql_datadir_path(server_id, params) + res = r.json() + if run_api.user_type == 'admin': + test_assert.status(r, 200) + assert ""datadir_path"" in res, res + else: + test_assert.status(r, 403) + assert res['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % res " -ideploy/rest/change_ownership,chaning ownership from admin to non-admin of an deployed island machine by an admin user ,"{ - ""deployment_uuids"" - ""owner"" - ""dest_user"" +/servers/rest/fix_messed_layers/,fixing messed layers of server using invalid id,"{ +uuid : ""invalid"" }","{ -""status"" : 400, -""message"":""The provided UUIDs might belong to the DeployedMachine. Trigger the correct API"" -}","def test_ideploy_change_ownership_with_deployed_machine_uuid(skip_if_not_admin, deploy_image, non_admin_exec_api, run_api): + ""status"" : 404, + ""message"" : 'Server with uuid invalid does not exist' +}","def test_server_fix_messed_layers_invalid_id(run_api): """""" - To change ownership of deployed machine from admin user to non-admin user by admin + server fix messed layers invalid id """""" - template, r = deploy_image - deploy_id = r.json()[""uuid""] - params = { - ""deployment_uuids"": [deploy_id], - ""owner"": run_api.user, - ""dest_user"": non_admin_exec_api.user - } - res = run_api.ideploy_change_ownership(params) - rjson = res.json() - test_assert.status(res, 400) - assert f""The provided UUIDs ['{deploy_id}'] might belong to the DeployedMachine. Trigger the correct API"" in rjson[""error""], rjson + r = run_api.server_fix_messed_layers('invalid') + if run_api.user_type == 'non-admin': + test_assert.status(r, 403) + rjson = r.json() + rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + else: + test_assert.status(r, 404) + rjson = r.json() + rjson['error'] == 'Server with uuid invalid does not exist', ""|> Json %s"" % rjson " -ideploy/rest/deploy_filter_fields/,successful filtering of the fields of deployed island machine ,,"{ - ""status"":200, - ""response"":list of filters -}","def test_ideploy_deploy_filter_fields(run_api): +/servers/rest/messed_layers_uuids/,fixing messed layers uuid of server using invalid id,"{ +uuid : ""invalid"" +}","{ + ""status"" : 404, + ""message"" : 'Server with uuid invalid does not exist' +}","def test_server_messed_layers_uuids_invalid_uuid(run_api): """""" - ideploy deploy filter fields + server_messed_layers_uuids invalid uuid """""" - r = run_api.ideploy_filter_fields() - test_assert.status(r, 200) + uuid = 'invalid' + r = run_api.server_messed_layers_uuids(uuid) + if run_api.user_type == 'admin': + test_assert.status(r, 404) + rjson = r.json() + assert rjson['error'] == 'Server with uuid invalid does not exist', ""|> Json %s"" % rjson + else: + test_assert.status(r, 403) + rjson = r.json() + rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson " -license/rest/licenses_check, when day params is equal to duration of license,,200: licence status,"def test_license_check_when_day_is_equal_to_duration(admin_exec_api, run_api): +/servers/rest/messed_layers_uuids_response/,fixing messed layers uuid response of server using invalid id,"{ +uuid : ""invalid"" +}","{ + ""status"" : 404, + ""message"" : 'Server with uuid invalid does not exist' +}","def test_sserver_messed_layers_uuids_response_invalid_uuid(run_api): """""" - license check day is equal to duration + server_messed_layers_uuids_response invalid uuid """""" - res = admin_exec_api.license_list() - license_list = res.json() - active_license_list = [licenses for licenses in license_list['results'] if licenses['state'] == 'active'] - durations = [json.loads(lic['data'])[""duration""] for lic in active_license_list] - duration = max(durations) - total_duration = duration + math.ceil(5 * duration / 100) - r = run_api.license_check(days=total_duration) + uuid = 'invalid' + r = run_api.server_messed_layers_uuids_response(uuid) + test_assert.status(r, 404) rjson = r.json() - test_assert.status(r, 200) - assert rjson['msg'] == ""Some License(s) are expiring soon"", ""The error %s"" % rjson + assert rjson['error'] == 'Server with uuid invalid does not exist', ""|> Json %s"" % rjson " -license/rest/licenses_check, when day params is negative,,"200: { - ""result"": ""FAILURE"", - ""error"": ""Value of `days` cannot be negative"" -}","def test_license_check_when_day_is_negative(run_api): +/servers/rest/prepare-for-backup/,"preparing server backup without token. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 400, + ""message"" : 'Token required' +}","def test_server_prepare_for_backup_without_token(run_api): """""" - license check when day is negative + server_prepare_for_backup without token """""" - r = run_api.license_check(days=-1) + r = run_api.server_prepare_for_backup() rjson = r.json() - test_assert.status(r, 400) - assert rjson['error'] == ""Value of `days` cannot be negative"", ""The error %s"" % rjson + if run_api.user_type == 'admin': + test_assert.status(r, 400) + assert rjson['error'] == 'Token Required', ""|> Json %s"" % rjson + else: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson + + " -license/rest/licenses_check, when day params is zero,,200: licence status,"def test_license_check_when_day_is_zero(run_api): +/servers/rest/prepare-for-backup/,"preparing server backup with invalid token. Check the user type before performing the operation, only admin user type have the permission to perform such operations. +",,"{ + ""status"" : 400, + ""message"" : 'Invalid backup token' +}","def test_server_prepare_for_backup_with_invalid_token(run_api): """""" - license check when day is 0 + server prepare for backup with invalid token """""" - r = run_api.license_check() + # create backup token incase if isn't already there + r = run_api.server_backup_token() + params = { + 'token': 'invalid' + } + r = run_api.server_prepare_for_backup(params) rjson = r.json() - test_assert.status(r, 200) - assert rjson['warn'] is False, ""The error %s"" % rjson - assert rjson['msg'] == ""All good!"", ""The error %s"" % rjson + if run_api.user_type == 'admin': + test_assert.status(r, 400) + assert rjson['error'] == 'Invalid backup token.', ""|> Json %s"" % rjson + else: + test_assert.status(r, 403) + assert rjson['detail'] == 'You do not have permission to perform this action.', ""|> Json %s"" % rjson "