diff --git a/src/migrate/HISTORY.rst b/src/migrate/HISTORY.rst index c2676e0b715..2def66096b2 100644 --- a/src/migrate/HISTORY.rst +++ b/src/migrate/HISTORY.rst @@ -11,6 +11,10 @@ Release History +++++++++++++++ * New version. -2.0.1b1 +2.0.0b1 +++++++++++++++ -* Switch to experimental version. \ No newline at end of file +* Switch to experimental version. + +3.0.0b1 ++++++++++++++++ +* Refactor codebase for improved readability and maintainability. diff --git a/src/migrate/azext_migrate/_help.py b/src/migrate/azext_migrate/_help.py index 100dc4d7535..070d2485701 100644 --- a/src/migrate/azext_migrate/_help.py +++ b/src/migrate/azext_migrate/_help.py @@ -71,31 +71,31 @@ text: | az migrate local get-discovered-server \\ --project-name myMigrateProject \\ - --resource-group-name myRG + --resource-group myRG - name: Get a specific discovered server by name text: | az migrate local get-discovered-server \\ --project-name myMigrateProject \\ - --resource-group-name myRG \\ + --resource-group myRG \\ --name machine-12345 - name: Filter discovered servers by display name text: | az migrate local get-discovered-server \\ --project-name myMigrateProject \\ - --resource-group-name myRG \\ + --resource-group myRG \\ --display-name "web-server" - name: List VMware servers discovered by a specific appliance text: | az migrate local get-discovered-server \\ --project-name myMigrateProject \\ - --resource-group-name myRG \\ + --resource-group myRG \\ --appliance-name myVMwareAppliance \\ --source-machine-type VMware - name: Get a specific server from a specific appliance text: | az migrate local get-discovered-server \\ --project-name myMigrateProject \\ - --resource-group-name myRG \\ + --resource-group myRG \\ --appliance-name myAppliance \\ --name machine-12345 \\ --source-machine-type HyperV @@ -152,14 +152,14 @@ - name: Initialize replication infrastructure text: | az migrate local replication init \\ - --resource-group-name myRG \\ + --resource-group myRG \\ --project-name myMigrateProject \\ --source-appliance-name myVMwareAppliance \\ --target-appliance-name myAzStackHCIAppliance - name: Initialize and return success status text: | az migrate local replication init \\ - --resource-group-name myRG \\ + --resource-group myRG \\ --project-name myMigrateProject \\ --source-appliance-name mySourceAppliance \\ --target-appliance-name myTargetAppliance \\ @@ -268,7 +268,7 @@ az migrate local replication new \\ --machine-index 1 \\ --project-name myMigrateProject \\ - --resource-group-name myRG \\ + --resource-group myRG \\ --target-storage-path-id "XZXZ" \\ --target-resource-group-id "YZYZ" \\ --target-vm-name migratedVM01 \\ @@ -303,3 +303,116 @@ --target-test-virtual-switch-id "XYXY" \\ --os-disk-id "disk-0" """ + +helps['migrate local replication remove'] = """ + type: command + short-summary: Stop replication for a migrated server. + long-summary: | + Stops the replication for a migrated server and removes + the replication configuration. + This command disables protection for the specified server. + + Note: This command uses a preview API version + and may experience breaking changes in future releases. + parameters: + - name: --target-object-id --id + short-summary: Replicating server ARM ID to disable replication. + long-summary: > + Specifies the ARM resource ID of the replicating server + for which replication needs to be disabled. + The ID should be retrieved using a get or list command + for replication items. + - name: --force-remove --force + short-summary: Force remove the replication. + long-summary: > + Specifies whether the replication needs to be + force removed. Default is false. + Use this option to remove replication even if + the cleanup process encounters errors. + - name: --subscription-id + short-summary: Azure subscription ID. + long-summary: > + The subscription containing the replication resources. + Uses the current subscription if not specified. + examples: + - name: Stop replication for a migrated server + text: | + az migrate local replication remove \\ + --target-object-id "XXXX" + - name: Force remove replication for a server + text: | + az migrate local replication remove \\ + --target-object-id "XXXX" \\ + --force-remove true + - name: Stop replication using short parameter names + text: | + az migrate local replication remove \\ + --id "XXXX" \\ + --force +""" + +helps['migrate local replication get-job'] = """ + type: command + short-summary: Retrieve the status of an Azure Migrate job. + long-summary: | + Get the status and details of an Azure Migrate replication job. + You can retrieve a specific job by its ARM ID or name, + or list all jobs in a migrate project. + + Note: This command uses a preview API version + and may experience breaking changes in future releases. + parameters: + - name: --job-id --id + short-summary: Job ARM ID for which details need to be retrieved. + long-summary: > + Specifies the full ARM resource ID of the job. + When provided, retrieves the specific job details. + - name: --resource-group -g + short-summary: Resource group name where the vault is present. + long-summary: > + The name of the resource group containing + the recovery services vault. + Required when using --project-name. + - name: --project-name + short-summary: Name of the migrate project. + long-summary: > + The name of the Azure Migrate project. + Required when using --resource-group. + - name: --job-name --name + short-summary: Job identifier/name. + long-summary: > + The name of the specific job to retrieve. + If not provided, lists all jobs in the project. + - name: --subscription-id + short-summary: Azure subscription ID. + long-summary: > + The subscription containing the migrate project. + Uses the current subscription if not specified. + examples: + - name: Get a specific job by ARM ID + text: | + az migrate local replication get-job \\ + --job-id "/subscriptions/{sub-id}/resourceGroups/{rg}/providers/Microsoft.DataReplication/replicationVaults/{vault}/jobs/{job-name}" + - name: Get a specific job by name + text: | + az migrate local replication get-job \\ + --resource-group myRG \\ + --project-name myMigrateProject \\ + --job-name myJobName + - name: List all jobs in a project + text: | + az migrate local replication get-job \\ + --resource-group myRG \\ + --project-name myMigrateProject + - name: Get job using short parameter names + text: | + az migrate local replication get-job \\ + --id "/subscriptions/{sub-id}/resourceGroups/{rg}/providers/Microsoft.DataReplication/replicationVaults/{vault}/jobs/{job-name}" + - name: Get job with specific subscription + text: | + az migrate local replication get-job \\ + -g myRG \\ + --project-name myMigrateProject \\ + --name myJobName \\ + --subscription-id "12345678-1234-1234-1234-123456789012" +""" diff --git a/src/migrate/azext_migrate/_initialize_replication_infrastructure_helpers.py b/src/migrate/azext_migrate/_initialize_replication_infrastructure_helpers.py deleted file mode 100644 index 68055b3265f..00000000000 --- a/src/migrate/azext_migrate/_initialize_replication_infrastructure_helpers.py +++ /dev/null @@ -1,1556 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- - -import time -from knack.util import CLIError -from knack.log import get_logger -from azext_migrate._helpers import ( - send_get_request, - get_resource_by_id, - delete_resource, - create_or_update_resource, - generate_hash_for_artifact, - APIVersion, - ProvisioningState, - AzLocalInstanceTypes, - FabricInstanceTypes, - ReplicationPolicyDetails, - RoleDefinitionIds, - StorageAccountProvisioningState -) -import json - - -def validate_required_parameters(resource_group_name, - project_name, - source_appliance_name, - target_appliance_name): - # Validate required parameters - if not resource_group_name: - raise CLIError("resource_group_name is required.") - if not project_name: - raise CLIError("project_name is required.") - if not source_appliance_name: - raise CLIError("source_appliance_name is required.") - if not target_appliance_name: - raise CLIError("target_appliance_name is required.") - - -def get_and_validate_resource_group(cmd, subscription_id, - resource_group_name): - """Get and validate that the resource group exists.""" - rg_uri = (f"/subscriptions/{subscription_id}/" - f"resourceGroups/{resource_group_name}") - resource_group = get_resource_by_id( - cmd, rg_uri, APIVersion.Microsoft_Resources.value) - if not resource_group: - raise CLIError( - f"Resource group '{resource_group_name}' does not exist " - f"in the subscription.") - print(f"Selected Resource Group: '{resource_group_name}'") - return rg_uri - - -def get_migrate_project(cmd, project_uri, project_name): - """Get and validate migrate project.""" - migrate_project = get_resource_by_id( - cmd, project_uri, APIVersion.Microsoft_Migrate.value) - if not migrate_project: - raise CLIError(f"Migrate project '{project_name}' not found.") - - if (migrate_project.get('properties', {}).get('provisioningState') != - ProvisioningState.Succeeded.value): - raise CLIError( - f"Migrate project '{project_name}' is not in a valid state.") - - return migrate_project - - -def get_data_replication_solution(cmd, project_uri): - """Get Data Replication Service Solution.""" - amh_solution_name = ( - "Servers-Migration-ServerMigration_DataReplication") - amh_solution_uri = f"{project_uri}/solutions/{amh_solution_name}" - amh_solution = get_resource_by_id( - cmd, amh_solution_uri, APIVersion.Microsoft_Migrate.value) - if not amh_solution: - raise CLIError( - f"No Data Replication Service Solution " - f"'{amh_solution_name}' found.") - return amh_solution - - -def get_discovery_solution(cmd, project_uri): - """Get Discovery Solution.""" - discovery_solution_name = "Servers-Discovery-ServerDiscovery" - discovery_solution_uri = ( - f"{project_uri}/solutions/{discovery_solution_name}") - discovery_solution = get_resource_by_id( - cmd, discovery_solution_uri, APIVersion.Microsoft_Migrate.value) - if not discovery_solution: - raise CLIError( - f"Server Discovery Solution '{discovery_solution_name}' " - f"not found.") - return discovery_solution - - -def get_and_setup_replication_vault(cmd, amh_solution, rg_uri): - """Get and setup replication vault with managed identity.""" - # Validate Replication Vault - vault_id = (amh_solution.get('properties', {}) - .get('details', {}) - .get('extendedDetails', {}) - .get('vaultId')) - if not vault_id: - raise CLIError( - "No Replication Vault found. Please verify your " - "Azure Migrate project setup.") - - replication_vault_name = vault_id.split("/")[8] - vault_uri = ( - f"{rg_uri}/providers/Microsoft.DataReplication/" - f"replicationVaults/{replication_vault_name}") - replication_vault = get_resource_by_id( - cmd, vault_uri, APIVersion.Microsoft_DataReplication.value) - if not replication_vault: - raise CLIError( - f"No Replication Vault '{replication_vault_name}' found.") - - # Check if vault has managed identity, if not, enable it - vault_identity = ( - replication_vault.get('identity') or - replication_vault.get('properties', {}).get('identity') - ) - if not vault_identity or not vault_identity.get('principalId'): - print( - f"Replication vault '{replication_vault_name}' does not " - f"have a managed identity. " - "Enabling system-assigned identity..." - ) - - # Update vault to enable system-assigned managed identity - vault_update_body = { - "identity": { - "type": "SystemAssigned" - } - } - - replication_vault = create_or_update_resource( - cmd, vault_uri, APIVersion.Microsoft_DataReplication.value, - vault_update_body - ) - - # Wait for identity to be created - time.sleep(30) - - # Refresh vault to get the identity - replication_vault = get_resource_by_id( - cmd, vault_uri, APIVersion.Microsoft_DataReplication.value) - vault_identity = ( - replication_vault.get('identity') or - replication_vault.get('properties', {}).get('identity') - ) - - if not vault_identity or not vault_identity.get('principalId'): - raise CLIError( - f"Failed to enable managed identity for replication " - f"vault '{replication_vault_name}'") - - print( - f"✓ Enabled system-assigned managed identity. " - f"Principal ID: {vault_identity.get('principalId')}" - ) - else: - print( - f"✓ Replication vault has managed identity. " - f"Principal ID: {vault_identity.get('principalId')}") - - return replication_vault, replication_vault_name - - -def _store_appliance_site_mapping(app_map, appliance_name, site_id): - """Store appliance name to site ID mapping in both lowercase and - original case.""" - app_map[appliance_name.lower()] = site_id - app_map[appliance_name] = site_id - - -def _process_v3_dict_map(app_map, app_map_v3): - """Process V3 appliance map in dict format.""" - for appliance_name_key, site_info in app_map_v3.items(): - if isinstance(site_info, dict) and 'SiteId' in site_info: - _store_appliance_site_mapping( - app_map, appliance_name_key, site_info['SiteId']) - elif isinstance(site_info, str): - _store_appliance_site_mapping( - app_map, appliance_name_key, site_info) - - -def _process_v3_list_item(app_map, item): - """Process a single item from V3 appliance list.""" - if not isinstance(item, dict): - return - - # Check if it has ApplianceName/SiteId structure - if 'ApplianceName' in item and 'SiteId' in item: - _store_appliance_site_mapping( - app_map, item['ApplianceName'], item['SiteId']) - return - - # Or it might be a single key-value pair - for key, value in item.items(): - if isinstance(value, dict) and 'SiteId' in value: - _store_appliance_site_mapping( - app_map, key, value['SiteId']) - elif isinstance(value, str): - _store_appliance_site_mapping(app_map, key, value) - - -def _process_v3_appliance_map(app_map, app_map_v3): - """Process V3 appliance map data structure.""" - if isinstance(app_map_v3, dict): - _process_v3_dict_map(app_map, app_map_v3) - elif isinstance(app_map_v3, list): - for item in app_map_v3: - _process_v3_list_item(app_map, item) - - -def parse_appliance_mappings(discovery_solution): - """Parse appliance name to site ID mappings from discovery solution.""" - app_map = {} - extended_details = (discovery_solution.get('properties', {}) - .get('details', {}) - .get('extendedDetails', {})) - - # Process applianceNameToSiteIdMapV2 - if 'applianceNameToSiteIdMapV2' in extended_details: - try: - app_map_v2 = json.loads( - extended_details['applianceNameToSiteIdMapV2']) - if isinstance(app_map_v2, list): - for item in app_map_v2: - if (isinstance(item, dict) and - 'ApplianceName' in item and - 'SiteId' in item): - # Store both lowercase and original case - app_map[item['ApplianceName'].lower()] = ( - item['SiteId']) - app_map[item['ApplianceName']] = item['SiteId'] - except (json.JSONDecodeError, KeyError, TypeError) as e: - get_logger(__name__).warning( - "Failed to parse applianceNameToSiteIdMapV2: %s", str(e)) - - # Process applianceNameToSiteIdMapV3 - if 'applianceNameToSiteIdMapV3' in extended_details: - try: - app_map_v3 = json.loads( - extended_details['applianceNameToSiteIdMapV3']) - _process_v3_appliance_map(app_map, app_map_v3) - except (json.JSONDecodeError, KeyError, TypeError) as e: - get_logger(__name__).warning( - "Failed to parse applianceNameToSiteIdMapV3: %s", str(e)) - - if not app_map: - raise CLIError( - "Server Discovery Solution missing Appliance Details. " - "Invalid Solution.") - - return app_map - - -def validate_and_get_site_ids(app_map, source_appliance_name, - target_appliance_name): - """Validate appliance names and get their site IDs.""" - # Validate SourceApplianceName & TargetApplianceName - try both - # original and lowercase - source_site_id = (app_map.get(source_appliance_name) or - app_map.get(source_appliance_name.lower())) - target_site_id = (app_map.get(target_appliance_name) or - app_map.get(target_appliance_name.lower())) - - if not source_site_id: - # Provide helpful error message with available appliances - # (filter out duplicates) - available_appliances = list(set(k for k in app_map - if k not in app_map or - not k.islower())) - if not available_appliances: - # If all keys are lowercase, show them - available_appliances = list(set(app_map.keys())) - raise CLIError( - f"Source appliance '{source_appliance_name}' not in " - f"discovery solution. " - f"Available appliances: {','.join(available_appliances)}" - ) - if not target_site_id: - # Provide helpful error message with available appliances - # (filter out duplicates) - available_appliances = list(set(k for k in app_map - if k not in app_map or - not k.islower())) - if not available_appliances: - # If all keys are lowercase, show them - available_appliances = list(set(app_map.keys())) - raise CLIError( - f"Target appliance '{target_appliance_name}' not in " - f"discovery solution. " - f"Available appliances: {','.join(available_appliances)}" - ) - - return source_site_id, target_site_id - - -def determine_instance_types(source_site_id, target_site_id, - source_appliance_name, - target_appliance_name): - """Determine instance types based on site IDs.""" - hyperv_site_pattern = "/Microsoft.OffAzure/HyperVSites/" - vmware_site_pattern = "/Microsoft.OffAzure/VMwareSites/" - - if (hyperv_site_pattern in source_site_id and - hyperv_site_pattern in target_site_id): - instance_type = AzLocalInstanceTypes.HyperVToAzLocal.value - fabric_instance_type = FabricInstanceTypes.HyperVInstance.value - elif (vmware_site_pattern in source_site_id and - hyperv_site_pattern in target_site_id): - instance_type = AzLocalInstanceTypes.VMwareToAzLocal.value - fabric_instance_type = FabricInstanceTypes.VMwareInstance.value - else: - src_type = ( - 'VMware' if vmware_site_pattern in source_site_id - else 'HyperV' if hyperv_site_pattern in source_site_id - else 'Unknown' - ) - tgt_type = ( - 'VMware' if vmware_site_pattern in target_site_id - else 'HyperV' if hyperv_site_pattern in target_site_id - else 'Unknown' - ) - raise CLIError( - f"Error matching source '{source_appliance_name}' and target " - f"'{target_appliance_name}' appliances. Source is {src_type}, " - f"Target is {tgt_type}" - ) - - return instance_type, fabric_instance_type - - -def find_fabric(all_fabrics, appliance_name, fabric_instance_type, - amh_solution, is_source=True): - """Find and validate a fabric for the given appliance.""" - logger = get_logger(__name__) - fabric = None - fabric_candidates = [] - - for candidate in all_fabrics: - props = candidate.get('properties', {}) - custom_props = props.get('customProperties', {}) - fabric_name = candidate.get('name', '') - - # Check if this fabric matches our criteria - is_succeeded = (props.get('provisioningState') == - ProvisioningState.Succeeded.value) - - # Check solution ID match - handle case differences and trailing - # slashes - fabric_solution_id = (custom_props.get('migrationSolutionId', '') - .rstrip('/')) - expected_solution_id = amh_solution.get('id', '').rstrip('/') - is_correct_solution = (fabric_solution_id.lower() == - expected_solution_id.lower()) - - is_correct_instance = (custom_props.get('instanceType') == - fabric_instance_type) - - # Check if fabric name contains appliance name or vice versa - name_matches = ( - fabric_name.lower().startswith(appliance_name.lower()) or - appliance_name.lower() in fabric_name.lower() or - fabric_name.lower() in appliance_name.lower() or - f"{appliance_name.lower()}-" in fabric_name.lower() - ) - - # Collect potential candidates even if they don't fully match - if custom_props.get('instanceType') == fabric_instance_type: - fabric_candidates.append({ - 'name': fabric_name, - 'state': props.get('provisioningState'), - 'solution_match': is_correct_solution, - 'name_match': name_matches - }) - - if is_succeeded and is_correct_instance and name_matches: - # If solution doesn't match, log warning but still consider it - if not is_correct_solution: - logger.warning( - "Fabric '%s' matches name and type but has " - "different solution ID", fabric_name) - fabric = candidate - break - - if not fabric: - appliance_type_label = "source" if is_source else "target" - error_msg = ( - f"Couldn't find connected {appliance_type_label} appliance " - f"'{appliance_name}'.\n") - - if fabric_candidates: - error_msg += ( - f"Found {len(fabric_candidates)} fabric(s) with " - f"matching type '{fabric_instance_type}': \n") - for candidate in fabric_candidates: - error_msg += ( - f" - {candidate['name']} " - f"(state: {candidate['state']}, " - f"solution_match: {candidate['solution_match']}, " - f"name_match: {candidate['name_match']})\n") - error_msg += "\nPlease verify:\n" - error_msg += "1. The appliance name matches exactly\n" - error_msg += "2. The fabric is in 'Succeeded' state\n" - error_msg += ( - "3. The fabric belongs to the correct migration solution") - else: - error_msg += ( - f"No fabrics found with instance type " - f"'{fabric_instance_type}'.\n") - error_msg += "\nThis usually means:\n" - error_msg += ( - f"1. The {appliance_type_label} appliance " - f"'{appliance_name}' is not properly configured\n") - if (fabric_instance_type == - FabricInstanceTypes.VMwareInstance.value): - appliance_type = 'VMware' - elif (fabric_instance_type == - FabricInstanceTypes.HyperVInstance.value): - appliance_type = 'HyperV' - else: - appliance_type = 'Azure Local' - error_msg += ( - f"2. The appliance type doesn't match " - f"(expecting {appliance_type})\n") - error_msg += ( - "3. The fabric creation is still in progress - " - "wait a few minutes and retry") - - if all_fabrics: - error_msg += "\n\nAvailable fabrics in resource group:\n" - for fab in all_fabrics: - props = fab.get('properties', {}) - custom_props = props.get('customProperties', {}) - error_msg += ( - f" - {fab.get('name')} " - f"(type: {custom_props.get('instanceType')})\n") - - raise CLIError(error_msg) - - return fabric - - -def get_fabric_agent(cmd, replication_fabrics_uri, fabric, appliance_name, - fabric_instance_type): - """Get and validate fabric agent (DRA) for the given fabric.""" - fabric_name = fabric.get('name') - dras_uri = ( - f"{replication_fabrics_uri}/{fabric_name}" - f"/fabricAgents?api-version=" - f"{APIVersion.Microsoft_DataReplication.value}" - ) - dras_response = send_get_request(cmd, dras_uri) - dras = dras_response.json().get('value', []) - - dra = None - for candidate in dras: - props = candidate.get('properties', {}) - custom_props = props.get('customProperties', {}) - if (props.get('machineName') == appliance_name and - custom_props.get('instanceType') == fabric_instance_type and - bool(props.get('isResponsive'))): - dra = candidate - break - - if not dra: - raise CLIError( - f"The appliance '{appliance_name}' is in a disconnected state." - ) - - return dra - - -def setup_replication_policy(cmd, - rg_uri, - replication_vault_name, - instance_type): - """Setup or validate replication policy.""" - policy_name = f"{replication_vault_name}{instance_type}policy" - policy_uri = ( - f"{rg_uri}/providers/Microsoft.DataReplication/replicationVaults" - f"/{replication_vault_name}/replicationPolicies/{policy_name}" - ) - - # Try to get existing policy, handle not found gracefully - try: - policy = get_resource_by_id( - cmd, policy_uri, APIVersion.Microsoft_DataReplication.value - ) - except CLIError as e: - error_str = str(e) - if ("ResourceNotFound" in error_str or "404" in error_str or - "Not Found" in error_str): - # Policy doesn't exist, this is expected for new setups - print(f"Policy '{policy_name}' does not exist, will create it.") - policy = None - else: - # Some other error occurred, re-raise it - raise - - # Handle existing policy states - if policy: - provisioning_state = ( - policy - .get('properties', {}) - .get('provisioningState') - ) - - # Wait for creating/updating to complete - if provisioning_state in [ProvisioningState.Creating.value, - ProvisioningState.Updating.value]: - print( - f"Policy '{policy_name}' found in Provisioning State " - f"'{provisioning_state}'." - ) - for i in range(20): - time.sleep(30) - policy = get_resource_by_id( - cmd, policy_uri, - APIVersion.Microsoft_DataReplication.value - ) - if policy: - provisioning_state = ( - policy.get('properties', {}).get('provisioningState') - ) - if provisioning_state not in [ - ProvisioningState.Creating.value, - ProvisioningState.Updating.value]: - break - - # Remove policy if in bad state - if provisioning_state in [ProvisioningState.Canceled.value, - ProvisioningState.Failed.value]: - print( - f"Policy '{policy_name}' found in unusable state " - f"'{provisioning_state}'. Removing..." - ) - delete_resource( - cmd, policy_uri, APIVersion.Microsoft_DataReplication.value - ) - time.sleep(30) - policy = None - - # Create policy if needed - if not policy or ( - policy and - policy.get('properties', {}).get('provisioningState') == - ProvisioningState.Deleted.value): - print(f"Creating Policy '{policy_name}'...") - - recoveryPoint = ( - ReplicationPolicyDetails.RecoveryPointHistoryInMinutes - ) - crashConsistentFreq = ( - ReplicationPolicyDetails.CrashConsistentFrequencyInMinutes - ) - appConsistentFreq = ( - ReplicationPolicyDetails.AppConsistentFrequencyInMinutes - ) - - policy_body = { - "properties": { - "customProperties": { - "instanceType": instance_type, - "recoveryPointHistoryInMinutes": recoveryPoint, - "crashConsistentFrequencyInMinutes": crashConsistentFreq, - "appConsistentFrequencyInMinutes": appConsistentFreq - } - } - } - - create_or_update_resource( - cmd, - policy_uri, - APIVersion.Microsoft_DataReplication.value, - policy_body, - ) - - # Wait for policy creation - for i in range(20): - time.sleep(30) - try: - policy = get_resource_by_id( - cmd, policy_uri, - APIVersion.Microsoft_DataReplication.value - ) - except Exception as poll_error: - # During creation, it might still return 404 initially - if ("ResourceNotFound" in str(poll_error) or - "404" in str(poll_error)): - print(f"Policy creation in progress... ({i + 1}/20)") - continue - raise - - if policy: - provisioning_state = ( - policy.get('properties', {}).get('provisioningState') - ) - print(f"Policy state: {provisioning_state}") - if provisioning_state in [ - ProvisioningState.Succeeded.value, - ProvisioningState.Failed.value, - ProvisioningState.Canceled.value, - ProvisioningState.Deleted.value]: - break - - if not policy or ( - policy.get('properties', {}).get('provisioningState') != - ProvisioningState.Succeeded.value): - raise CLIError(f"Policy '{policy_name}' is not in Succeeded state.") - - return policy - - -def setup_cache_storage_account(cmd, rg_uri, amh_solution, - cache_storage_account_id, - source_site_id, source_appliance_name, - migrate_project, project_name): - """Setup or validate cache storage account.""" - logger = get_logger(__name__) - - amh_stored_storage_account_id = ( - amh_solution.get('properties', {}) - .get('details', {}) - .get('extendedDetails', {}) - .get('replicationStorageAccountId') - ) - cache_storage_account = None - - if amh_stored_storage_account_id: - # Check existing storage account - storage_account_name = amh_stored_storage_account_id.split("/")[8] - storage_uri = ( - f"{rg_uri}/providers/Microsoft.Storage/storageAccounts" - f"/{storage_account_name}" - ) - storage_account = get_resource_by_id( - cmd, storage_uri, APIVersion.Microsoft_Storage.value - ) - - if storage_account and ( - storage_account - .get('properties', {}) - .get('provisioningState') == - StorageAccountProvisioningState.Succeeded.value - ): - cache_storage_account = storage_account - if (cache_storage_account_id and - cache_storage_account['id'] != - cache_storage_account_id): - warning_msg = ( - f"A Cache Storage Account '{storage_account_name}' is " - f"already linked. " - ) - warning_msg += "Ignoring provided -cache_storage_account_id." - logger.warning(warning_msg) - - # Use user-provided storage account if no existing one - if not cache_storage_account and cache_storage_account_id: - storage_account_name = cache_storage_account_id.split("/")[8].lower() - storage_uri = ( - f"{rg_uri}/providers/Microsoft.Storage/storageAccounts/" - f"{storage_account_name}" - ) - user_storage_account = get_resource_by_id( - cmd, storage_uri, APIVersion.Microsoft_Storage.value - ) - - if user_storage_account and ( - user_storage_account - .get('properties', {}) - .get('provisioningState') == - StorageAccountProvisioningState.Succeeded.value - ): - cache_storage_account = user_storage_account - else: - error_msg = ( - f"Cache Storage Account with Id " - f"'{cache_storage_account_id}' not found " - ) - error_msg += "or not in valid state." - raise CLIError(error_msg) - - # Create new storage account if needed - if not cache_storage_account: - artifact = f"{source_site_id}/{source_appliance_name}" - suffix_hash = generate_hash_for_artifact(artifact) - if len(suffix_hash) > 14: - suffix_hash = suffix_hash[:14] - storage_account_name = f"migratersa{suffix_hash}" - - print(f"Creating Cache Storage Account '{storage_account_name}'...") - - storage_body = { - "location": migrate_project.get('location'), - "tags": {"Migrate Project": project_name}, - "sku": {"name": "Standard_LRS"}, - "kind": "StorageV2", - "properties": { - "allowBlobPublicAccess": False, - "allowCrossTenantReplication": True, - "minimumTlsVersion": "TLS1_2", - "networkAcls": { - "defaultAction": "Allow" - }, - "encryption": { - "services": { - "blob": {"enabled": True}, - "file": {"enabled": True} - }, - "keySource": "Microsoft.Storage" - }, - "accessTier": "Hot" - } - } - - storage_uri = ( - f"{rg_uri}/providers/Microsoft.Storage/storageAccounts" - f"/{storage_account_name}" - ) - cache_storage_account = create_or_update_resource( - cmd, - storage_uri, - APIVersion.Microsoft_Storage.value, - storage_body - ) - - for _ in range(20): - time.sleep(30) - cache_storage_account = get_resource_by_id( - cmd, - storage_uri, - APIVersion.Microsoft_Storage.value - ) - if cache_storage_account and ( - cache_storage_account - .get('properties', {}) - .get('provisioningState') == - StorageAccountProvisioningState.Succeeded.value - ): - break - - if not cache_storage_account or ( - cache_storage_account - .get('properties', {}) - .get('provisioningState') != - StorageAccountProvisioningState.Succeeded.value - ): - raise CLIError("Failed to setup Cache Storage Account.") - - return cache_storage_account - - -def verify_storage_account_network_settings(cmd, - rg_uri, - cache_storage_account): - """Verify and update storage account network settings if needed.""" - storage_account_id = cache_storage_account['id'] - - # Verify storage account network settings - print("Verifying storage account network configuration...") - network_acls = ( - cache_storage_account.get('properties', {}).get('networkAcls', {}) - ) - default_action = network_acls.get('defaultAction', 'Allow') - - if default_action != 'Allow': - print( - f"WARNING: Storage account network defaultAction is " - f"'{default_action}'. " - "This may cause permission issues." - ) - print( - "Updating storage account to allow public network access..." - ) - - # Update storage account to allow public access - storage_account_name = storage_account_id.split("/")[-1] - storage_uri = ( - f"{rg_uri}/providers/Microsoft.Storage/storageAccounts/" - f"{storage_account_name}" - ) - - update_body = { - "properties": { - "networkAcls": { - "defaultAction": "Allow" - } - } - } - - create_or_update_resource( - cmd, storage_uri, APIVersion.Microsoft_Storage.value, - update_body - ) - - # Wait for network update to propagate - time.sleep(30) - - -def get_all_fabrics(cmd, rg_uri, resource_group_name, - source_appliance_name, - target_appliance_name, project_name): - """Get all replication fabrics in the resource group.""" - replication_fabrics_uri = ( - f"{rg_uri}/providers/Microsoft.DataReplication/replicationFabrics" - ) - fabrics_uri = ( - f"{replication_fabrics_uri}?api-version=" - f"{APIVersion.Microsoft_DataReplication.value}" - ) - fabrics_response = send_get_request(cmd, fabrics_uri) - all_fabrics = fabrics_response.json().get('value', []) - - # If no fabrics exist at all, provide helpful message - if not all_fabrics: - raise CLIError( - f"No replication fabrics found in resource group " - f"'{resource_group_name}'. " - f"Please ensure that: \n" - f"1. The source appliance '{source_appliance_name}' is deployed " - f"and connected\n" - f"2. The target appliance '{target_appliance_name}' is deployed " - f"and connected\n" - f"3. Both appliances are registered with the Azure Migrate " - f"project '{project_name}'" - ) - - return all_fabrics, replication_fabrics_uri - - -def _get_role_name(role_def_id): - """Get role name from role definition ID.""" - return ("Contributor" if role_def_id == RoleDefinitionIds.ContributorId - else "Storage Blob Data Contributor") - - -def _assign_role_to_principal(auth_client, storage_account_id, - subscription_id, - principal_id, role_def_id, - principal_type_name): - """Assign a role to a principal if not already assigned.""" - from uuid import uuid4 - from azure.mgmt.authorization.models import ( - RoleAssignmentCreateParameters, PrincipalType - ) - - role_name = _get_role_name(role_def_id) - - # Check if assignment exists - assignments = auth_client.role_assignments.list_for_scope( - scope=storage_account_id, - filter=f"principalId eq '{principal_id}'" - ) - - roles = [a.role_definition_id.endswith(role_def_id) for a in assignments] - has_role = any(roles) - - if not has_role: - role_assignment_params = RoleAssignmentCreateParameters( - role_definition_id=( - f"/subscriptions/{subscription_id}/providers" - f"/Microsoft.Authorization/roleDefinitions/{role_def_id}" - ), - principal_id=principal_id, - principal_type=PrincipalType.SERVICE_PRINCIPAL - ) - auth_client.role_assignments.create( - scope=storage_account_id, - role_assignment_name=str(uuid4()), - parameters=role_assignment_params - ) - print( - f" ✓ Created {role_name} role for {principal_type_name} " - f"{principal_id[:8]}..." - ) - return f"{principal_id[:8]} - {role_name}", False - print( - f" ✓ {role_name} role already exists for {principal_type_name} " - f"{principal_id[:8]}" - ) - return f"{principal_id[:8]} - {role_name} (existing)", True - - -def _verify_role_assignments(auth_client, storage_account_id, - expected_principal_ids): - """Verify that role assignments were created successfully.""" - print("Verifying role assignments...") - all_assignments = list( - auth_client.role_assignments.list_for_scope( - scope=storage_account_id - ) - ) - verified_principals = set() - - for assignment in all_assignments: - principal_id = assignment.principal_id - if principal_id in expected_principal_ids: - verified_principals.add(principal_id) - role_id = assignment.role_definition_id.split('/')[-1] - role_display = _get_role_name(role_id) - print( - f" ✓ Verified {role_display} for principal " - f"{principal_id[:8]}" - ) - - missing_principals = set(expected_principal_ids) - verified_principals - if missing_principals: - print( - f"WARNING: {len(missing_principals)} principal(s) missing role " - f"assignments: " - ) - for principal in missing_principals: - print(f" - {principal}") - - -def grant_storage_permissions(cmd, storage_account_id, source_dra, - target_dra, replication_vault, subscription_id): - """Grant role assignments for DRAs and vault identity to storage acct.""" - from azure.mgmt.authorization import AuthorizationManagementClient - - # Get role assignment client - from azure.cli.core.commands.client_factory import ( - get_mgmt_service_client - ) - auth_client = get_mgmt_service_client( - cmd.cli_ctx, AuthorizationManagementClient - ) - - source_dra_object_id = ( - source_dra.get('properties', {}) - .get('resourceAccessIdentity', {}).get('objectId') - ) - target_dra_object_id = ( - target_dra.get('properties', {}) - .get('resourceAccessIdentity', {}).get('objectId') - ) - - # Get vault identity from either root level or properties level - vault_identity = ( - replication_vault.get('identity') or - replication_vault.get('properties', {}).get('identity') - ) - vault_identity_id = ( - vault_identity.get('principalId') if vault_identity else None - ) - - print("Granting permissions to the storage account...") - print(f" Source DRA Principal ID: {source_dra_object_id}") - print(f" Target DRA Principal ID: {target_dra_object_id}") - print(f" Vault Identity Principal ID: {vault_identity_id}") - - successful_assignments = [] - failed_assignments = [] - - # Create role assignments for source and target DRAs - for object_id in [source_dra_object_id, target_dra_object_id]: - if object_id: - for role_def_id in [ - RoleDefinitionIds.ContributorId, - RoleDefinitionIds.StorageBlobDataContributorId - ]: - try: - assignment_msg, _ = _assign_role_to_principal( - auth_client, storage_account_id, subscription_id, - object_id, role_def_id, "DRA" - ) - successful_assignments.append(assignment_msg) - except CLIError as e: - role_name = _get_role_name(role_def_id) - error_msg = f"{object_id[:8]} - {role_name}: {str(e)}" - failed_assignments.append(error_msg) - - # Grant vault identity permissions if exists - if vault_identity_id: - for role_def_id in [RoleDefinitionIds.ContributorId, - RoleDefinitionIds.StorageBlobDataContributorId]: - try: - assignment_msg, _ = _assign_role_to_principal( - auth_client, storage_account_id, subscription_id, - vault_identity_id, role_def_id, "vault" - ) - successful_assignments.append(assignment_msg) - except CLIError as e: - role_name = _get_role_name(role_def_id) - error_msg = f"{vault_identity_id[:8]} - {role_name}: {str(e)}" - failed_assignments.append(error_msg) - - # Report role assignment status - print("\nRole Assignment Summary:") - print(f" Successful: {len(successful_assignments)}") - if failed_assignments: - print(f" Failed: {len(failed_assignments)}") - for failure in failed_assignments: - print(f" - {failure}") - - # If there are failures, raise an error - if failed_assignments: - raise CLIError( - f"Failed to create {len(failed_assignments)} role " - f"assignment(s). " - "The storage account may not have proper permissions." - ) - - # Add a wait after role assignments to ensure propagation - time.sleep(120) - - # Verify role assignments were successful - expected_principal_ids = [ - source_dra_object_id, target_dra_object_id, vault_identity_id - ] - _verify_role_assignments( - auth_client, storage_account_id, expected_principal_ids - ) - - -def update_amh_solution_storage(cmd, - project_uri, - amh_solution, - storage_account_id): - """Update AMH solution with storage account ID if needed.""" - amh_solution_uri = ( - f"{project_uri}/solutions/" - f"Servers-Migration-ServerMigration_DataReplication" - ) - - if (amh_solution - .get('properties', {}) - .get('details', {}) - .get('extendedDetails', {}) - .get('replicationStorageAccountId')) != storage_account_id: - extended_details = (amh_solution - .get('properties', {}) - .get('details', {}) - .get('extendedDetails', {})) - extended_details['replicationStorageAccountId'] = ( - storage_account_id - ) - - solution_body = { - "properties": { - "details": { - "extendedDetails": extended_details - } - } - } - - create_or_update_resource( - cmd, amh_solution_uri, APIVersion.Microsoft_Migrate.value, - solution_body - ) - - # Wait for the AMH solution update to fully propagate - time.sleep(60) - - return amh_solution_uri - - -def get_or_check_existing_extension(cmd, extension_uri, - replication_extension_name, - storage_account_id): - """Get existing extension and check if it's in a good state.""" - # Try to get existing extension, handle not found gracefully - try: - replication_extension = get_resource_by_id( - cmd, extension_uri, APIVersion.Microsoft_DataReplication.value - ) - except CLIError as e: - error_str = str(e) - if ("ResourceNotFound" in error_str or "404" in error_str or - "Not Found" in error_str): - # Extension doesn't exist, this is expected for new setups - print( - f"Extension '{replication_extension_name}' does not exist, " - f"will create it." - ) - return None, False - # Some other error occurred, re-raise it - raise - - # Check if extension exists and is in good state - if replication_extension: - existing_state = ( - replication_extension.get('properties', {}) - .get('provisioningState') - ) - existing_storage_id = (replication_extension - .get('properties', {}) - .get('customProperties', {}) - .get('storageAccountId')) - - print( - f"Found existing extension '{replication_extension_name}' in " - f"state: {existing_state}" - ) - - # If it's succeeded with the correct storage account, we're done - if (existing_state == ProvisioningState.Succeeded.value and - existing_storage_id == storage_account_id): - print( - "Replication Extension already exists with correct " - "configuration." - ) - print("Successfully initialized replication infrastructure") - return None, True # Signal that we're done - - # If it's in a bad state or has wrong storage account, delete it - if (existing_state in [ProvisioningState.Failed.value, - ProvisioningState.Canceled.value] or - existing_storage_id != storage_account_id): - print(f"Removing existing extension (state: {existing_state})") - delete_resource( - cmd, extension_uri, APIVersion.Microsoft_DataReplication.value - ) - time.sleep(120) - return None, False - - return replication_extension, False - - -def verify_extension_prerequisites(cmd, rg_uri, replication_vault_name, - instance_type, storage_account_id, - amh_solution_uri, source_fabric_id, - target_fabric_id): - """Verify all prerequisites before creating extension.""" - print("\nVerifying prerequisites before creating extension...") - - # 1. Verify policy is succeeded - policy_name = f"{replication_vault_name}{instance_type}policy" - policy_uri = ( - f"{rg_uri}/providers/Microsoft.DataReplication/replicationVaults" - f"/{replication_vault_name}/replicationPolicies/{policy_name}" - ) - policy_check = get_resource_by_id( - cmd, policy_uri, APIVersion.Microsoft_DataReplication.value) - if (policy_check.get('properties', {}).get('provisioningState') != - ProvisioningState.Succeeded.value): - raise CLIError( - "Policy is not in Succeeded state: {}".format( - policy_check.get('properties', {}).get('provisioningState'))) - - # 2. Verify storage account is succeeded - storage_account_name = storage_account_id.split("/")[-1] - storage_uri = ( - f"{rg_uri}/providers/Microsoft.Storage/storageAccounts/" - f"{storage_account_name}") - storage_check = get_resource_by_id( - cmd, storage_uri, APIVersion.Microsoft_Storage.value) - if (storage_check - .get('properties', {}) - .get('provisioningState') != - StorageAccountProvisioningState.Succeeded.value): - raise CLIError( - "Storage account is not in Succeeded state: {}".format( - storage_check.get('properties', {}).get( - 'provisioningState'))) - - # 3. Verify AMH solution has storage account - solution_check = get_resource_by_id( - cmd, amh_solution_uri, APIVersion.Microsoft_Migrate.value) - if (solution_check - .get('properties', {}) - .get('details', {}) - .get('extendedDetails', {}) - .get('replicationStorageAccountId') != storage_account_id): - raise CLIError( - "AMH solution doesn't have the correct storage account ID") - - # 4. Verify fabrics are responsive - source_fabric_check = get_resource_by_id( - cmd, source_fabric_id, APIVersion.Microsoft_DataReplication.value) - if (source_fabric_check.get('properties', {}).get('provisioningState') != - ProvisioningState.Succeeded.value): - raise CLIError("Source fabric is not in Succeeded state") - - target_fabric_check = get_resource_by_id( - cmd, target_fabric_id, APIVersion.Microsoft_DataReplication.value) - if (target_fabric_check.get('properties', {}).get('provisioningState') != - ProvisioningState.Succeeded.value): - raise CLIError("Target fabric is not in Succeeded state") - - print("All prerequisites verified successfully!") - time.sleep(30) - - -def list_existing_extensions(cmd, rg_uri, replication_vault_name): - """List existing extensions for informational purposes.""" - existing_extensions_uri = ( - f"{rg_uri}/providers/Microsoft.DataReplication" - f"/replicationVaults/{replication_vault_name}" - f"/replicationExtensions" - f"?api-version={APIVersion.Microsoft_DataReplication.value}" - ) - try: - existing_extensions_response = send_get_request( - cmd, existing_extensions_uri) - existing_extensions = ( - existing_extensions_response.json().get('value', [])) - if existing_extensions: - print(f"Found {len(existing_extensions)} existing " - f"extension(s): ") - for ext in existing_extensions: - ext_name = ext.get('name') - ext_state = ( - ext.get('properties', {}).get('provisioningState')) - ext_type = (ext.get('properties', {}) - .get('customProperties', {}) - .get('instanceType')) - print(f" - {ext_name}: state={ext_state}, " - f"type={ext_type}") - else: - print("No existing extensions found") - except CLIError as list_error: - # If listing fails, it might mean no extensions exist at all - print(f"Could not list extensions (this is normal for new " - f"projects): {str(list_error)}") - - -def build_extension_body(instance_type, source_fabric_id, - target_fabric_id, storage_account_id): - """Build the extension body based on instance type.""" - print("\n=== Creating extension for replication infrastructure ===") - print(f"Instance Type: {instance_type}") - print(f"Source Fabric ID: {source_fabric_id}") - print(f"Target Fabric ID: {target_fabric_id}") - print(f"Storage Account ID: {storage_account_id}") - - # Build the extension body with properties in the exact order from - # the working API call - if instance_type == AzLocalInstanceTypes.VMwareToAzLocal.value: - # Match exact property order from working call for VMware - extension_body = { - "properties": { - "customProperties": { - "azStackHciFabricArmId": target_fabric_id, - "storageAccountId": storage_account_id, - "storageAccountSasSecretName": None, - "instanceType": instance_type, - "vmwareFabricArmId": source_fabric_id - } - } - } - elif instance_type == AzLocalInstanceTypes.HyperVToAzLocal.value: - # For HyperV, use similar order but with hyperVFabricArmId - extension_body = { - "properties": { - "customProperties": { - "azStackHciFabricArmId": target_fabric_id, - "storageAccountId": storage_account_id, - "storageAccountSasSecretName": None, - "instanceType": instance_type, - "hyperVFabricArmId": source_fabric_id - } - } - } - else: - raise CLIError(f"Unsupported instance type: {instance_type}") - - # Debug: Print the exact body being sent - body_str = json.dumps(extension_body, indent=2) - print(f"Extension body being sent: \n{body_str}") - - return extension_body - - -def _wait_for_extension_creation(cmd, extension_uri): - """Wait for extension creation to complete.""" - for i in range(20): - time.sleep(30) - try: - api_version = APIVersion.Microsoft_DataReplication.value - replication_extension = get_resource_by_id( - cmd, extension_uri, api_version) - if replication_extension: - ext_state = replication_extension.get( - 'properties', {}).get('provisioningState') - print(f"Extension state: {ext_state}") - if ext_state in [ProvisioningState.Succeeded.value, - ProvisioningState.Failed.value, - ProvisioningState.Canceled.value]: - break - except CLIError: - print(f"Waiting for extension... ({i + 1}/20)") - - -def _handle_extension_creation_error(cmd, extension_uri, create_error): - """Handle errors during extension creation.""" - error_str = str(create_error) - print(f"Error during extension creation: {error_str}") - - # Check if extension was created despite the error - time.sleep(30) - try: - api_version = APIVersion.Microsoft_DataReplication.value - replication_extension = get_resource_by_id( - cmd, extension_uri, api_version) - if replication_extension: - print( - f"Extension exists despite error, " - f"state: {replication_extension.get('properties', {}).get('provisioningState')}" - ) - except CLIError: - replication_extension = None - - if not replication_extension: - raise CLIError( - f"Failed to create replication extension: " - f"{str(create_error)}") from create_error - - -def create_replication_extension(cmd, extension_uri, extension_body): - """Create the replication extension and wait for it to complete.""" - try: - result = create_or_update_resource( - cmd, extension_uri, - APIVersion.Microsoft_DataReplication.value, - extension_body) - if result: - print("Extension creation initiated successfully") - # Wait for the extension to be created - print("Waiting for extension creation to complete...") - _wait_for_extension_creation(cmd, extension_uri) - except CLIError as create_error: - _handle_extension_creation_error(cmd, extension_uri, create_error) - - -def setup_replication_extension(cmd, rg_uri, replication_vault_name, - source_fabric, target_fabric, - instance_type, storage_account_id, - amh_solution_uri, pass_thru): - """Setup replication extension - main orchestration function.""" - # Setup Replication Extension - source_fabric_id = source_fabric['id'] - target_fabric_id = target_fabric['id'] - source_fabric_short_name = source_fabric_id.split('/')[-1] - target_fabric_short_name = target_fabric_id.split('/')[-1] - replication_extension_name = ( - f"{source_fabric_short_name}-{target_fabric_short_name}-" - f"MigReplicationExtn") - - extension_uri = ( - f"{rg_uri}/providers/Microsoft.DataReplication/" - f"replicationVaults/{replication_vault_name}/" - f"replicationExtensions/{replication_extension_name}" - ) - - # Get or check existing extension - replication_extension, is_complete = get_or_check_existing_extension( - cmd, extension_uri, replication_extension_name, - storage_account_id - ) - - if is_complete: - return True if pass_thru else None - - # Verify prerequisites - verify_extension_prerequisites( - cmd, rg_uri, replication_vault_name, instance_type, - storage_account_id, amh_solution_uri, source_fabric_id, - target_fabric_id - ) - - # Create extension if needed - if not replication_extension: - print( - f"Creating Replication Extension " - f"'{replication_extension_name}'...") - - # List existing extensions for context - list_existing_extensions(cmd, rg_uri, replication_vault_name) - - # Build extension body - extension_body = build_extension_body( - instance_type, source_fabric_id, target_fabric_id, - storage_account_id - ) - - # Create the extension - create_replication_extension(cmd, extension_uri, extension_body) - - print("Successfully initialized replication infrastructure") - return True if pass_thru else None - - -def setup_project_and_solutions(cmd, - subscription_id, - resource_group_name, - project_name): - """Setup and retrieve project and solutions.""" - rg_uri = get_and_validate_resource_group( - cmd, subscription_id, resource_group_name) - project_uri = (f"{rg_uri}/providers/Microsoft.Migrate/migrateprojects/" - f"{project_name}") - migrate_project = get_migrate_project(cmd, project_uri, project_name) - amh_solution = get_data_replication_solution(cmd, project_uri) - discovery_solution = get_discovery_solution(cmd, project_uri) - - return ( - rg_uri, - project_uri, - migrate_project, - amh_solution, - discovery_solution - ) - - -def setup_appliances_and_types(discovery_solution, - source_appliance_name, - target_appliance_name): - """Parse appliance mappings and determine instance types.""" - app_map = parse_appliance_mappings(discovery_solution) - source_site_id, target_site_id = validate_and_get_site_ids( - app_map, source_appliance_name, target_appliance_name - ) - result = determine_instance_types( - source_site_id, target_site_id, source_appliance_name, - target_appliance_name - ) - instance_type, fabric_instance_type = result - return ( - source_site_id, - instance_type, - fabric_instance_type - ) - - -def setup_fabrics_and_dras(cmd, rg_uri, resource_group_name, - source_appliance_name, target_appliance_name, - project_name, fabric_instance_type, - amh_solution): - """Get all fabrics and set up DRAs.""" - all_fabrics, replication_fabrics_uri = get_all_fabrics( - cmd, rg_uri, resource_group_name, source_appliance_name, - target_appliance_name, project_name - ) - - source_fabric = find_fabric( - all_fabrics, source_appliance_name, fabric_instance_type, - amh_solution, is_source=True) - target_fabric_instance_type = FabricInstanceTypes.AzLocalInstance.value - target_fabric = find_fabric( - all_fabrics, target_appliance_name, target_fabric_instance_type, - amh_solution, is_source=False) - - source_dra = get_fabric_agent( - cmd, replication_fabrics_uri, source_fabric, - source_appliance_name, fabric_instance_type) - target_dra = get_fabric_agent( - cmd, replication_fabrics_uri, target_fabric, - target_appliance_name, target_fabric_instance_type) - - return source_fabric, target_fabric, source_dra, target_dra - - -def setup_storage_and_permissions(cmd, rg_uri, amh_solution, - cache_storage_account_id, source_site_id, - source_appliance_name, migrate_project, - project_name, source_dra, target_dra, - replication_vault, subscription_id): - """Setup storage account and grant permissions.""" - cache_storage_account = setup_cache_storage_account( - cmd, rg_uri, amh_solution, cache_storage_account_id, - source_site_id, source_appliance_name, migrate_project, project_name - ) - - storage_account_id = cache_storage_account['id'] - verify_storage_account_network_settings( - cmd, rg_uri, cache_storage_account) - grant_storage_permissions( - cmd, storage_account_id, source_dra, target_dra, - replication_vault, subscription_id) - - return storage_account_id - - -def initialize_infrastructure_components(cmd, rg_uri, project_uri, - amh_solution, - replication_vault_name, - instance_type, migrate_project, - project_name, - cache_storage_account_id, - source_site_id, - source_appliance_name, source_dra, - target_dra, replication_vault, - subscription_id): - """Initialize policy, storage, and AMH solution.""" - setup_replication_policy( - cmd, rg_uri, replication_vault_name, instance_type) - - storage_account_id = setup_storage_and_permissions( - cmd, rg_uri, amh_solution, cache_storage_account_id, - source_site_id, source_appliance_name, migrate_project, project_name, - source_dra, target_dra, replication_vault, subscription_id - ) - - amh_solution_uri = update_amh_solution_storage( - cmd, project_uri, amh_solution, storage_account_id) - - return storage_account_id, amh_solution_uri - - -def execute_replication_infrastructure_setup(cmd, subscription_id, - resource_group_name, - project_name, - source_appliance_name, - target_appliance_name, - cache_storage_account_id, - pass_thru): - """Execute the complete replication infrastructure setup workflow.""" - # Setup project and solutions - (rg_uri, project_uri, migrate_project, amh_solution, - discovery_solution) = setup_project_and_solutions( - cmd, subscription_id, resource_group_name, project_name - ) - - # Get and setup replication vault - (replication_vault, - replication_vault_name) = get_and_setup_replication_vault( - cmd, amh_solution, rg_uri) - - # Setup appliances and determine types - (source_site_id, instance_type, - fabric_instance_type) = setup_appliances_and_types( - discovery_solution, source_appliance_name, target_appliance_name - ) - - # Setup fabrics and DRAs - (source_fabric, target_fabric, source_dra, - target_dra) = setup_fabrics_and_dras( - cmd, rg_uri, resource_group_name, source_appliance_name, - target_appliance_name, project_name, fabric_instance_type, - amh_solution - ) - - # Initialize policy, storage, and AMH solution - (storage_account_id, - amh_solution_uri) = initialize_infrastructure_components( - cmd, rg_uri, project_uri, amh_solution, replication_vault_name, - instance_type, migrate_project, project_name, - cache_storage_account_id, source_site_id, source_appliance_name, - source_dra, target_dra, replication_vault, subscription_id - ) - - # Setup Replication Extension - return setup_replication_extension( - cmd, rg_uri, replication_vault_name, source_fabric, - target_fabric, instance_type, storage_account_id, - amh_solution_uri, pass_thru - ) diff --git a/src/migrate/azext_migrate/_params.py b/src/migrate/azext_migrate/_params.py index e713ee24500..5c23358dc69 100644 --- a/src/migrate/azext_migrate/_params.py +++ b/src/migrate/azext_migrate/_params.py @@ -26,11 +26,11 @@ def load_arguments(self, _): with self.argument_context('migrate') as c: c.argument('subscription_id', subscription_id_type) - with self.argument_context('migrate local get-discovered-server') as c: + with self.argument_context('migrate get-discovered-server') as c: c.argument('project_name', project_name_type, required=True) c.argument( - 'resource_group_name', - options_list=['--resource-group-name', '--resource-group', '-g'], + 'resource_group', + options_list=['--resource-group', '-g'], help='Name of the resource group containing the Azure Migrate ' 'project.', required=True) @@ -50,8 +50,8 @@ def load_arguments(self, _): with self.argument_context('migrate local replication init') as c: c.argument( - 'resource_group_name', - options_list=['--resource-group-name', '--resource-group', '-g'], + 'resource_group', + options_list=['--resource-group', '-g'], help='Specifies the Resource Group of the Azure Migrate ' 'Project.', required=True) @@ -106,8 +106,8 @@ def load_arguments(self, _): help='Name of the Azure Migrate project. Required when using ' '--machine-index.') c.argument( - 'resource_group_name', - options_list=['--resource-group-name', '--resource-group', '-g'], + 'resource_group', + options_list=['--resource-group', '-g'], help='Name of the resource group containing the Azure Migrate ' 'project. Required when using --machine-index.') c.argument( @@ -183,3 +183,39 @@ def load_arguments(self, _): 'scenario.', required=True) c.argument('subscription_id', subscription_id_type) + + with self.argument_context('migrate local replication remove') as c: + c.argument( + 'target_object_id', + options_list=['--target-object-id', '--id'], + help='Specifies the replicating server ARM ID for which ' + 'replication needs to be disabled. The ID should be ' + 'retrieved using the get command.') + c.argument( + 'force_remove', + options_list=['--force-remove', '--force'], + arg_type=get_three_state_flag(), + help='Specifies whether the replication needs to be force ' + 'removed. Default is false.') + c.argument('subscription_id', subscription_id_type) + + with self.argument_context('migrate local replication get-job') as c: + c.argument( + 'job_id', + options_list=['--job-id', '--id'], + help='Specifies the job ARM ID for which the details need to ' + 'be retrieved.') + c.argument( + 'resource_group', + options_list=['--resource-group', '-g'], + help='The name of the resource group where the recovery ' + 'services vault is present.') + c.argument( + 'project_name', + project_name_type, + help='The name of the migrate project.') + c.argument( + 'job_name', + options_list=['--job-name', '--name'], + help='Job identifier.') + c.argument('subscription_id', subscription_id_type) diff --git a/src/migrate/azext_migrate/commands.py b/src/migrate/azext_migrate/commands.py index 7c94169edaf..12c97a2ce5d 100644 --- a/src/migrate/azext_migrate/commands.py +++ b/src/migrate/azext_migrate/commands.py @@ -6,9 +6,11 @@ def load_command_table(self, _): # Azure Local Migration Commands - with self.command_group('migrate local') as g: + with self.command_group('migrate') as g: g.custom_command('get-discovered-server', 'get_discovered_server') with self.command_group('migrate local replication') as g: g.custom_command('init', 'initialize_replication_infrastructure') g.custom_command('new', 'new_local_server_replication') + g.custom_command('remove', 'remove_local_server_replication') + g.custom_command('get-job', 'get_local_replication_job') diff --git a/src/migrate/azext_migrate/custom.py b/src/migrate/azext_migrate/custom.py index cd363dcffd6..21ae489b020 100644 --- a/src/migrate/azext_migrate/custom.py +++ b/src/migrate/azext_migrate/custom.py @@ -5,7 +5,7 @@ from knack.util import CLIError from knack.log import get_logger -from azext_migrate._helpers import ( +from azext_migrate.helpers._utils import ( send_get_request, ) @@ -14,7 +14,7 @@ def get_discovered_server(cmd, project_name, - resource_group_name, + resource_group, display_name=None, source_machine_type=None, subscription_id=None, @@ -26,7 +26,7 @@ def get_discovered_server(cmd, Args: cmd: The CLI command context project_name (str): Specifies the migrate project name (required) - resource_group_name (str): Specifies the resource group name + resource_group (str): Specifies the resource group name (required) display_name (str, optional): Specifies the source machine display name @@ -45,8 +45,8 @@ def get_discovered_server(cmd, CLIError: If required parameters are missing or the API request fails """ - from azext_migrate._helpers import APIVersion - from azext_migrate._get_discovered_server_helpers import ( + from azext_migrate.helpers._utils import APIVersion + from azext_migrate.helpers._server import ( validate_get_discovered_server_params, build_base_uri, fetch_all_servers, @@ -56,7 +56,7 @@ def get_discovered_server(cmd, # Validate required parameters validate_get_discovered_server_params( - project_name, resource_group_name, source_machine_type) + project_name, resource_group, source_machine_type) # Use current subscription if not provided if not subscription_id: @@ -66,7 +66,7 @@ def get_discovered_server(cmd, # Build the base URI base_uri = build_base_uri( - subscription_id, resource_group_name, project_name, + subscription_id, resource_group, project_name, appliance_name, name, source_machine_type) # Use the correct API version @@ -105,7 +105,7 @@ def get_discovered_server(cmd, def initialize_replication_infrastructure(cmd, - resource_group_name, + resource_group, project_name, source_appliance_name, target_appliance_name, @@ -120,7 +120,7 @@ def initialize_replication_infrastructure(cmd, Args: cmd: The CLI command context - resource_group_name (str): Specifies the Resource Group of the + resource_group (str): Specifies the Resource Group of the Azure Migrate Project (required) project_name (str): Specifies the name of the Azure Migrate project to be used for server migration (required) @@ -145,12 +145,14 @@ def initialize_replication_infrastructure(cmd, """ from azure.cli.core.commands.client_factory import \ get_subscription_id - from azext_migrate._initialize_replication_infrastructure_helpers import ( - validate_required_parameters, + from azext_migrate.helpers.replication.init._execute_init import ( execute_replication_infrastructure_setup) + from azext_migrate.helpers.replication.init._validate import ( + validate_required_parameters, + ) # Validate required parameters - validate_required_parameters(resource_group_name, + validate_required_parameters(resource_group, project_name, source_appliance_name, target_appliance_name) @@ -163,7 +165,7 @@ def initialize_replication_infrastructure(cmd, # Execute the complete setup workflow return execute_replication_infrastructure_setup( - cmd, subscription_id, resource_group_name, project_name, + cmd, subscription_id, resource_group, project_name, source_appliance_name, target_appliance_name, cache_storage_account_id, pass_thru ) @@ -185,7 +187,7 @@ def new_local_server_replication(cmd, machine_id=None, machine_index=None, project_name=None, - resource_group_name=None, + resource_group=None, target_vm_cpu_core=None, target_virtual_switch_id=None, target_test_virtual_switch_id=None, @@ -222,7 +224,7 @@ def new_local_server_replication(cmd, machine_id not provided) project_name (str, optional): Specifies the migrate project name (required when using machine_index) - resource_group_name (str, optional): Specifies the resource group + resource_group (str, optional): Specifies the resource group name (required when using machine_index) target_vm_cpu_core (int, optional): Specifies the number of CPU cores @@ -252,11 +254,15 @@ def new_local_server_replication(cmd, Raises: CLIError: If required parameters are missing or validation fails """ - from azext_migrate._helpers import SiteTypes - from azext_migrate._new_local_server_replication_helpers import ( + from azext_migrate.helpers._utils import SiteTypes + from azext_migrate.helpers.replication.new._validate import ( validate_server_parameters, validate_required_parameters, validate_ARM_id_formats, + validate_replication_extension, + validate_target_VM_name + ) + from azext_migrate.helpers.replication.new._process_inputs import ( process_site_type_hyperV, process_site_type_vmware, process_amh_solution, @@ -264,19 +270,20 @@ def new_local_server_replication(cmd, process_replication_policy, process_appliance_map, process_source_fabric, - process_target_fabric, - validate_replication_extension, + process_target_fabric + ) + from azext_migrate.helpers.replication.new._execute_new import ( get_ARC_resource_bridge_info, - validate_target_VM_name, construct_disk_and_nic_mapping, - create_protected_item) + create_protected_item + ) - rg_uri = validate_server_parameters( + rg_uri, machine_id = validate_server_parameters( cmd, machine_id, machine_index, project_name, - resource_group_name, + resource_group, source_appliance_name, subscription_id) @@ -450,3 +457,134 @@ def new_local_server_replication(cmd, except Exception as e: logger.error("Error creating replication: %s", str(e)) raise + + +def get_local_replication_job(cmd, + job_id=None, + resource_group=None, + project_name=None, + job_name=None, + subscription_id=None): + """ + Retrieve the status of an Azure Migrate job. + + This cmdlet is based on a preview API version and may experience + breaking changes in future releases. + + Args: + cmd: The CLI command context + job_id (str, optional): Specifies the job ARM ID for which + the details need to be retrieved + resource_group (str, optional): The name of the resource + group where the recovery services vault is present + project_name (str, optional): The name of the migrate project + job_name (str, optional): Job identifier/name + subscription_id (str, optional): Azure Subscription ID. Uses + current subscription if not provided + + Returns: + dict or list: Job details (single job or list of jobs) + + Raises: + CLIError: If required parameters are missing or the job is not found + """ + from azure.cli.core.commands.client_factory import \ + get_subscription_id + from azext_migrate.helpers.replication.job._parse import ( + parse_job_id, + get_vault_name_from_project + ) + from azext_migrate.helpers.replication.job._retrieve import ( + get_single_job, + list_all_jobs + ) + from azext_migrate.helpers.replication.job._format import ( + format_job_output, + format_job_summary + ) + + # Use current subscription if not provided + if not subscription_id: + subscription_id = get_subscription_id(cmd.cli_ctx) + + # Determine the operation mode based on provided parameters + if job_id: + # Mode: Get job by ID + vault_name, resource_group_name, job_name = \ + parse_job_id(job_id) + elif resource_group and project_name: + # Mode: Get job by name or list jobs + vault_name = get_vault_name_from_project( + cmd, resource_group, project_name, subscription_id) + resource_group_name = resource_group + else: + raise CLIError( + "Either --job-id or both --resource-group and " + "--project-name must be provided.") + + # Get a specific job or list all jobs + if job_name: + return get_single_job( + cmd, subscription_id, resource_group_name, + vault_name, job_name, format_job_output) + + return list_all_jobs( + cmd, subscription_id, resource_group_name, + vault_name, format_job_summary) + + +def remove_local_server_replication(cmd, + target_object_id, + force_remove=False, + subscription_id=None): + """ + Stop replication for a migrated server. + + This cmdlet is based on a preview API version and may experience + breaking changes in future releases. + + Args: + cmd: The CLI command context + target_object_id (str): Specifies the replicating server ARM ID + for which replication needs to be disabled (required) + force_remove (bool, optional): Specifies whether the replication + needs to be force removed. Default is False + subscription_id (str, optional): Azure Subscription ID. Uses + current subscription if not provided + + Returns: + dict: The job model from the API response + + Raises: + CLIError: If the protected item is not found or cannot be + removed in its current state + """ + from azure.cli.core.commands.client_factory import \ + get_subscription_id + from azext_migrate.helpers.replication.remove._parse import ( + parse_protected_item_id + ) + from azext_migrate.helpers.replication.remove._validate import ( + validate_protected_item + ) + from azext_migrate.helpers.replication.remove._execute_delete import ( + execute_removal + ) + + # Use current subscription if not provided + if not subscription_id: + subscription_id = get_subscription_id(cmd.cli_ctx) + + # Parse the protected item ID to extract components + resource_group_name, vault_name, protected_item_name = \ + parse_protected_item_id(target_object_id) + + # Validate the protected item exists and can be removed + validate_protected_item(cmd, target_object_id) + + # Execute the removal workflow + return execute_removal( + cmd, subscription_id, target_object_id, + resource_group_name, vault_name, + protected_item_name, force_remove + ) diff --git a/src/migrate/azext_migrate/_get_discovered_server_helpers.py b/src/migrate/azext_migrate/helpers/_server.py similarity index 96% rename from src/migrate/azext_migrate/_get_discovered_server_helpers.py rename to src/migrate/azext_migrate/helpers/_server.py index d001e19af4d..88e6b4d4257 100644 --- a/src/migrate/azext_migrate/_get_discovered_server_helpers.py +++ b/src/migrate/azext_migrate/helpers/_server.py @@ -93,6 +93,7 @@ def extract_server_info(server, index): # Default values machine_name = "N/A" + machine_id = "N/A" ip_addresses_str = 'N/A' os_name = "N/A" boot_type = "N/A" @@ -101,6 +102,7 @@ def extract_server_info(server, index): if discovery_data: latest_discovery = discovery_data[0] machine_name = latest_discovery.get('machineName', 'N/A') + machine_id = server.get('id', 'N/A') ip_addresses = latest_discovery.get('ipAddresses', []) ip_addresses_str = ', '.join(ip_addresses) if ip_addresses else 'N/A' os_name = latest_discovery.get('osName', 'N/A') @@ -116,6 +118,7 @@ def extract_server_info(server, index): return { 'index': index, 'machine_name': machine_name, + 'machine_id': machine_id, 'ip_addresses': ip_addresses_str, 'operating_system': os_name, 'boot_type': boot_type, @@ -128,6 +131,8 @@ def print_server_info(server_info): index_str = f"[{server_info['index']}]" print(f"{index_str} Machine Name: " f"{server_info['machine_name']}") + print(f"{' ' * len(index_str)} Machine Id: " + f"{server_info['machine_id']}") print(f"{' ' * len(index_str)} IP Addresses: " f"{server_info['ip_addresses']}") print(f"{' ' * len(index_str)} Operating System: " diff --git a/src/migrate/azext_migrate/_helpers.py b/src/migrate/azext_migrate/helpers/_utils.py similarity index 100% rename from src/migrate/azext_migrate/_helpers.py rename to src/migrate/azext_migrate/helpers/_utils.py diff --git a/src/migrate/azext_migrate/helpers/replication/init/_execute_init.py b/src/migrate/azext_migrate/helpers/replication/init/_execute_init.py new file mode 100644 index 00000000000..0853b17a59b --- /dev/null +++ b/src/migrate/azext_migrate/helpers/replication/init/_execute_init.py @@ -0,0 +1,200 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +from azext_migrate.helpers._utils import ( + FabricInstanceTypes +) +from azext_migrate.helpers.replication.init._validate import ( + get_and_validate_resource_group, + get_migrate_project, + get_data_replication_solution, + get_discovery_solution, + get_and_setup_replication_vault, + parse_appliance_mappings, + validate_and_get_site_ids +) +from azext_migrate.helpers.replication.init._setup_policy import ( + determine_instance_types, + find_fabric, + get_fabric_agent, + setup_replication_policy, + setup_cache_storage_account, + verify_storage_account_network_settings, + get_all_fabrics +) +from azext_migrate.helpers.replication.init._setup_permissions import ( + grant_storage_permissions, + update_amh_solution_storage +) +from azext_migrate.helpers.replication.init._setup_extension import ( + setup_replication_extension +) + + +def setup_project_and_solutions(cmd, + subscription_id, + resource_group_name, + project_name): + """Setup and retrieve project and solutions.""" + rg_uri = get_and_validate_resource_group( + cmd, subscription_id, resource_group_name) + project_uri = (f"{rg_uri}/providers/Microsoft.Migrate/migrateprojects/" + f"{project_name}") + migrate_project = get_migrate_project(cmd, project_uri, project_name) + amh_solution = get_data_replication_solution(cmd, project_uri) + discovery_solution = get_discovery_solution(cmd, project_uri) + + return ( + rg_uri, + project_uri, + migrate_project, + amh_solution, + discovery_solution + ) + + +def setup_appliances_and_types(discovery_solution, + source_appliance_name, + target_appliance_name): + """Parse appliance mappings and determine instance types.""" + app_map = parse_appliance_mappings(discovery_solution) + source_site_id, target_site_id = validate_and_get_site_ids( + app_map, source_appliance_name, target_appliance_name + ) + result = determine_instance_types( + source_site_id, target_site_id, source_appliance_name, + target_appliance_name + ) + instance_type, fabric_instance_type = result + return ( + source_site_id, + instance_type, + fabric_instance_type + ) + + +def setup_fabrics_and_dras(cmd, rg_uri, resource_group_name, + source_appliance_name, target_appliance_name, + project_name, fabric_instance_type, + amh_solution): + """Get all fabrics and set up DRAs.""" + all_fabrics, replication_fabrics_uri = get_all_fabrics( + cmd, rg_uri, resource_group_name, source_appliance_name, + target_appliance_name, project_name + ) + + source_fabric = find_fabric( + all_fabrics, source_appliance_name, fabric_instance_type, + amh_solution, is_source=True) + target_fabric_instance_type = FabricInstanceTypes.AzLocalInstance.value + target_fabric = find_fabric( + all_fabrics, target_appliance_name, target_fabric_instance_type, + amh_solution, is_source=False) + + source_dra = get_fabric_agent( + cmd, replication_fabrics_uri, source_fabric, + source_appliance_name, fabric_instance_type) + target_dra = get_fabric_agent( + cmd, replication_fabrics_uri, target_fabric, + target_appliance_name, target_fabric_instance_type) + + return source_fabric, target_fabric, source_dra, target_dra + + +def setup_storage_and_permissions(cmd, rg_uri, amh_solution, + cache_storage_account_id, source_site_id, + source_appliance_name, migrate_project, + project_name, source_dra, target_dra, + replication_vault, subscription_id): + """Setup storage account and grant permissions.""" + cache_storage_account = setup_cache_storage_account( + cmd, rg_uri, amh_solution, cache_storage_account_id, + source_site_id, source_appliance_name, migrate_project, project_name + ) + + storage_account_id = cache_storage_account['id'] + verify_storage_account_network_settings( + cmd, rg_uri, cache_storage_account) + grant_storage_permissions( + cmd, storage_account_id, source_dra, target_dra, + replication_vault, subscription_id) + + return storage_account_id + + +def initialize_infrastructure_components(cmd, rg_uri, project_uri, + amh_solution, + replication_vault_name, + instance_type, migrate_project, + project_name, + cache_storage_account_id, + source_site_id, + source_appliance_name, source_dra, + target_dra, replication_vault, + subscription_id): + """Initialize policy, storage, and AMH solution.""" + setup_replication_policy( + cmd, rg_uri, replication_vault_name, instance_type) + + storage_account_id = setup_storage_and_permissions( + cmd, rg_uri, amh_solution, cache_storage_account_id, + source_site_id, source_appliance_name, migrate_project, project_name, + source_dra, target_dra, replication_vault, subscription_id + ) + + amh_solution_uri = update_amh_solution_storage( + cmd, project_uri, amh_solution, storage_account_id) + + return storage_account_id, amh_solution_uri + + +def execute_replication_infrastructure_setup(cmd, subscription_id, + resource_group_name, + project_name, + source_appliance_name, + target_appliance_name, + cache_storage_account_id, + pass_thru): + """Execute the complete replication infrastructure setup workflow.""" + # Setup project and solutions + (rg_uri, project_uri, migrate_project, amh_solution, + discovery_solution) = setup_project_and_solutions( + cmd, subscription_id, resource_group_name, project_name + ) + + # Get and setup replication vault + (replication_vault, + replication_vault_name) = get_and_setup_replication_vault( + cmd, amh_solution, rg_uri) + + # Setup appliances and determine types + (source_site_id, instance_type, + fabric_instance_type) = setup_appliances_and_types( + discovery_solution, source_appliance_name, target_appliance_name + ) + + # Setup fabrics and DRAs + (source_fabric, target_fabric, source_dra, + target_dra) = setup_fabrics_and_dras( + cmd, rg_uri, resource_group_name, source_appliance_name, + target_appliance_name, project_name, fabric_instance_type, + amh_solution + ) + + # Initialize policy, storage, and AMH solution + (storage_account_id, + amh_solution_uri) = initialize_infrastructure_components( + cmd, rg_uri, project_uri, amh_solution, replication_vault_name, + instance_type, migrate_project, project_name, + cache_storage_account_id, source_site_id, source_appliance_name, + source_dra, target_dra, replication_vault, subscription_id + ) + + # Setup Replication Extension + return setup_replication_extension( + cmd, rg_uri, replication_vault_name, source_fabric, + target_fabric, instance_type, storage_account_id, + amh_solution_uri, pass_thru + ) diff --git a/src/migrate/azext_migrate/helpers/replication/init/_setup_extension.py b/src/migrate/azext_migrate/helpers/replication/init/_setup_extension.py new file mode 100644 index 00000000000..06a14d912c1 --- /dev/null +++ b/src/migrate/azext_migrate/helpers/replication/init/_setup_extension.py @@ -0,0 +1,344 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import time +from knack.util import CLIError +from azext_migrate.helpers._utils import ( + send_get_request, + get_resource_by_id, + delete_resource, + create_or_update_resource, + APIVersion, + ProvisioningState, + AzLocalInstanceTypes, + StorageAccountProvisioningState +) +import json + + +def get_or_check_existing_extension(cmd, extension_uri, + replication_extension_name, + storage_account_id): + """Get existing extension and check if it's in a good state.""" + # Try to get existing extension, handle not found gracefully + try: + replication_extension = get_resource_by_id( + cmd, extension_uri, APIVersion.Microsoft_DataReplication.value + ) + except CLIError as e: + error_str = str(e) + if ("ResourceNotFound" in error_str or "404" in error_str or + "Not Found" in error_str): + # Extension doesn't exist, this is expected for new setups + print( + f"Extension '{replication_extension_name}' does not exist, " + f"will create it." + ) + return None, False + # Some other error occurred, re-raise it + raise + + # Check if extension exists and is in good state + if replication_extension: + existing_state = ( + replication_extension.get('properties', {}) + .get('provisioningState') + ) + existing_storage_id = (replication_extension + .get('properties', {}) + .get('customProperties', {}) + .get('storageAccountId')) + + print( + f"Found existing extension '{replication_extension_name}' in " + f"state: {existing_state}" + ) + + # If it's succeeded with the correct storage account, we're done + if (existing_state == ProvisioningState.Succeeded.value and + existing_storage_id == storage_account_id): + print( + "Replication Extension already exists with correct " + "configuration." + ) + print("Successfully initialized replication infrastructure") + return None, True # Signal that we're done + + # If it's in a bad state or has wrong storage account, delete it + if (existing_state in [ProvisioningState.Failed.value, + ProvisioningState.Canceled.value] or + existing_storage_id != storage_account_id): + print(f"Removing existing extension (state: {existing_state})") + delete_resource( + cmd, extension_uri, APIVersion.Microsoft_DataReplication.value + ) + time.sleep(120) + return None, False + + return replication_extension, False + + +def verify_extension_prerequisites(cmd, rg_uri, replication_vault_name, + instance_type, storage_account_id, + amh_solution_uri, source_fabric_id, + target_fabric_id): + """Verify all prerequisites before creating extension.""" + print("\nVerifying prerequisites before creating extension...") + + # 1. Verify policy is succeeded + policy_name = f"{replication_vault_name}{instance_type}policy" + policy_uri = ( + f"{rg_uri}/providers/Microsoft.DataReplication/replicationVaults" + f"/{replication_vault_name}/replicationPolicies/{policy_name}" + ) + policy_check = get_resource_by_id( + cmd, policy_uri, APIVersion.Microsoft_DataReplication.value) + if (policy_check.get('properties', {}).get('provisioningState') != + ProvisioningState.Succeeded.value): + raise CLIError( + "Policy is not in Succeeded state: {}".format( + policy_check.get('properties', {}).get('provisioningState'))) + + # 2. Verify storage account is succeeded + storage_account_name = storage_account_id.split("/")[-1] + storage_uri = ( + f"{rg_uri}/providers/Microsoft.Storage/storageAccounts/" + f"{storage_account_name}") + storage_check = get_resource_by_id( + cmd, storage_uri, APIVersion.Microsoft_Storage.value) + if (storage_check + .get('properties', {}) + .get('provisioningState') != + StorageAccountProvisioningState.Succeeded.value): + raise CLIError( + "Storage account is not in Succeeded state: {}".format( + storage_check.get('properties', {}).get( + 'provisioningState'))) + + # 3. Verify AMH solution has storage account + solution_check = get_resource_by_id( + cmd, amh_solution_uri, APIVersion.Microsoft_Migrate.value) + if (solution_check + .get('properties', {}) + .get('details', {}) + .get('extendedDetails', {}) + .get('replicationStorageAccountId') != storage_account_id): + raise CLIError( + "AMH solution doesn't have the correct storage account ID") + + # 4. Verify fabrics are responsive + source_fabric_check = get_resource_by_id( + cmd, source_fabric_id, APIVersion.Microsoft_DataReplication.value) + if (source_fabric_check.get('properties', {}).get('provisioningState') != + ProvisioningState.Succeeded.value): + raise CLIError("Source fabric is not in Succeeded state") + + target_fabric_check = get_resource_by_id( + cmd, target_fabric_id, APIVersion.Microsoft_DataReplication.value) + if (target_fabric_check.get('properties', {}).get('provisioningState') != + ProvisioningState.Succeeded.value): + raise CLIError("Target fabric is not in Succeeded state") + + print("All prerequisites verified successfully!") + time.sleep(30) + + +def list_existing_extensions(cmd, rg_uri, replication_vault_name): + """List existing extensions for informational purposes.""" + existing_extensions_uri = ( + f"{rg_uri}/providers/Microsoft.DataReplication" + f"/replicationVaults/{replication_vault_name}" + f"/replicationExtensions" + f"?api-version={APIVersion.Microsoft_DataReplication.value}" + ) + try: + existing_extensions_response = send_get_request( + cmd, existing_extensions_uri) + existing_extensions = ( + existing_extensions_response.json().get('value', [])) + if existing_extensions: + print(f"Found {len(existing_extensions)} existing " + f"extension(s): ") + for ext in existing_extensions: + ext_name = ext.get('name') + ext_state = ( + ext.get('properties', {}).get('provisioningState')) + ext_type = (ext.get('properties', {}) + .get('customProperties', {}) + .get('instanceType')) + print(f" - {ext_name}: state={ext_state}, " + f"type={ext_type}") + else: + print("No existing extensions found") + except CLIError as list_error: + # If listing fails, it might mean no extensions exist at all + print(f"Could not list extensions (this is normal for new " + f"projects): {str(list_error)}") + + +def build_extension_body(instance_type, source_fabric_id, + target_fabric_id, storage_account_id): + """Build the extension body based on instance type.""" + print("\n=== Creating extension for replication infrastructure ===") + print(f"Instance Type: {instance_type}") + print(f"Source Fabric ID: {source_fabric_id}") + print(f"Target Fabric ID: {target_fabric_id}") + print(f"Storage Account ID: {storage_account_id}") + + # Build the extension body with properties in the exact order from + # the working API call + if instance_type == AzLocalInstanceTypes.VMwareToAzLocal.value: + # Match exact property order from working call for VMware + extension_body = { + "properties": { + "customProperties": { + "azStackHciFabricArmId": target_fabric_id, + "storageAccountId": storage_account_id, + "storageAccountSasSecretName": None, + "instanceType": instance_type, + "vmwareFabricArmId": source_fabric_id + } + } + } + elif instance_type == AzLocalInstanceTypes.HyperVToAzLocal.value: + # For HyperV, use similar order but with hyperVFabricArmId + extension_body = { + "properties": { + "customProperties": { + "azStackHciFabricArmId": target_fabric_id, + "storageAccountId": storage_account_id, + "storageAccountSasSecretName": None, + "instanceType": instance_type, + "hyperVFabricArmId": source_fabric_id + } + } + } + else: + raise CLIError(f"Unsupported instance type: {instance_type}") + + # Debug: Print the exact body being sent + body_str = json.dumps(extension_body, indent=2) + print(f"Extension body being sent: \n{body_str}") + + return extension_body + + +def _wait_for_extension_creation(cmd, extension_uri): + """Wait for extension creation to complete.""" + for i in range(20): + time.sleep(30) + try: + api_version = APIVersion.Microsoft_DataReplication.value + replication_extension = get_resource_by_id( + cmd, extension_uri, api_version) + if replication_extension: + ext_state = replication_extension.get( + 'properties', {}).get('provisioningState') + print(f"Extension state: {ext_state}") + if ext_state in [ProvisioningState.Succeeded.value, + ProvisioningState.Failed.value, + ProvisioningState.Canceled.value]: + break + except CLIError: + print(f"Waiting for extension... ({i + 1}/20)") + + +def _handle_extension_creation_error(cmd, extension_uri, create_error): + """Handle errors during extension creation.""" + error_str = str(create_error) + print(f"Error during extension creation: {error_str}") + + # Check if extension was created despite the error + time.sleep(30) + try: + api_version = APIVersion.Microsoft_DataReplication.value + replication_extension = get_resource_by_id( + cmd, extension_uri, api_version) + if replication_extension: + print( + f"Extension exists despite error, " + f"state: {replication_extension.get('properties', {}).get('provisioningState')}" + ) + except CLIError: + replication_extension = None + + if not replication_extension: + raise CLIError( + f"Failed to create replication extension: " + f"{str(create_error)}") from create_error + + +def create_replication_extension(cmd, extension_uri, extension_body): + """Create the replication extension and wait for it to complete.""" + try: + result = create_or_update_resource( + cmd, extension_uri, + APIVersion.Microsoft_DataReplication.value, + extension_body) + if result: + print("Extension creation initiated successfully") + # Wait for the extension to be created + print("Waiting for extension creation to complete...") + _wait_for_extension_creation(cmd, extension_uri) + except CLIError as create_error: + _handle_extension_creation_error(cmd, extension_uri, create_error) + + +def setup_replication_extension(cmd, rg_uri, replication_vault_name, + source_fabric, target_fabric, + instance_type, storage_account_id, + amh_solution_uri, pass_thru): + """Setup replication extension - main orchestration function.""" + # Setup Replication Extension + source_fabric_id = source_fabric['id'] + target_fabric_id = target_fabric['id'] + source_fabric_short_name = source_fabric_id.split('/')[-1] + target_fabric_short_name = target_fabric_id.split('/')[-1] + replication_extension_name = ( + f"{source_fabric_short_name}-{target_fabric_short_name}-" + f"MigReplicationExtn") + + extension_uri = ( + f"{rg_uri}/providers/Microsoft.DataReplication/" + f"replicationVaults/{replication_vault_name}/" + f"replicationExtensions/{replication_extension_name}" + ) + + # Get or check existing extension + replication_extension, is_complete = get_or_check_existing_extension( + cmd, extension_uri, replication_extension_name, + storage_account_id + ) + + if is_complete: + return True if pass_thru else None + + # Verify prerequisites + verify_extension_prerequisites( + cmd, rg_uri, replication_vault_name, instance_type, + storage_account_id, amh_solution_uri, source_fabric_id, + target_fabric_id + ) + + # Create extension if needed + if not replication_extension: + print( + f"Creating Replication Extension " + f"'{replication_extension_name}'...") + + # List existing extensions for context + list_existing_extensions(cmd, rg_uri, replication_vault_name) + + # Build extension body + extension_body = build_extension_body( + instance_type, source_fabric_id, target_fabric_id, + storage_account_id + ) + + # Create the extension + create_replication_extension(cmd, extension_uri, extension_body) + + print("Successfully initialized replication infrastructure") + return True if pass_thru else None diff --git a/src/migrate/azext_migrate/helpers/replication/init/_setup_permissions.py b/src/migrate/azext_migrate/helpers/replication/init/_setup_permissions.py new file mode 100644 index 00000000000..1a4c69cb30e --- /dev/null +++ b/src/migrate/azext_migrate/helpers/replication/init/_setup_permissions.py @@ -0,0 +1,239 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import time +from knack.util import CLIError +from azext_migrate.helpers._utils import ( + create_or_update_resource, + APIVersion, + RoleDefinitionIds +) + + +def _get_role_name(role_def_id): + """Get role name from role definition ID.""" + return ("Contributor" if role_def_id == RoleDefinitionIds.ContributorId + else "Storage Blob Data Contributor") + + +def _assign_role_to_principal(auth_client, storage_account_id, + subscription_id, + principal_id, role_def_id, + principal_type_name): + """Assign a role to a principal if not already assigned.""" + from uuid import uuid4 + from azure.mgmt.authorization.models import ( + RoleAssignmentCreateParameters, PrincipalType + ) + + role_name = _get_role_name(role_def_id) + + # Check if assignment exists + assignments = auth_client.role_assignments.list_for_scope( + scope=storage_account_id, + filter=f"principalId eq '{principal_id}'" + ) + + roles = [a.role_definition_id.endswith(role_def_id) for a in assignments] + has_role = any(roles) + + if not has_role: + role_assignment_params = RoleAssignmentCreateParameters( + role_definition_id=( + f"/subscriptions/{subscription_id}/providers" + f"/Microsoft.Authorization/roleDefinitions/{role_def_id}" + ), + principal_id=principal_id, + principal_type=PrincipalType.SERVICE_PRINCIPAL + ) + auth_client.role_assignments.create( + scope=storage_account_id, + role_assignment_name=str(uuid4()), + parameters=role_assignment_params + ) + print( + f" ✓ Created {role_name} role for {principal_type_name} " + f"{principal_id[:8]}..." + ) + return f"{principal_id[:8]} - {role_name}", False + print( + f" ✓ {role_name} role already exists for {principal_type_name} " + f"{principal_id[:8]}" + ) + return f"{principal_id[:8]} - {role_name} (existing)", True + + +def _verify_role_assignments(auth_client, storage_account_id, + expected_principal_ids): + """Verify that role assignments were created successfully.""" + print("Verifying role assignments...") + all_assignments = list( + auth_client.role_assignments.list_for_scope( + scope=storage_account_id + ) + ) + verified_principals = set() + + for assignment in all_assignments: + principal_id = assignment.principal_id + if principal_id in expected_principal_ids: + verified_principals.add(principal_id) + role_id = assignment.role_definition_id.split('/')[-1] + role_display = _get_role_name(role_id) + print( + f" ✓ Verified {role_display} for principal " + f"{principal_id[:8]}" + ) + + missing_principals = set(expected_principal_ids) - verified_principals + if missing_principals: + print( + f"WARNING: {len(missing_principals)} principal(s) missing role " + f"assignments: " + ) + for principal in missing_principals: + print(f" - {principal}") + + +def grant_storage_permissions(cmd, storage_account_id, source_dra, + target_dra, replication_vault, subscription_id): + """Grant role assignments for DRAs and vault identity to storage acct.""" + from azure.mgmt.authorization import AuthorizationManagementClient + + # Get role assignment client + from azure.cli.core.commands.client_factory import ( + get_mgmt_service_client + ) + auth_client = get_mgmt_service_client( + cmd.cli_ctx, AuthorizationManagementClient + ) + + source_dra_object_id = ( + source_dra.get('properties', {}) + .get('resourceAccessIdentity', {}).get('objectId') + ) + target_dra_object_id = ( + target_dra.get('properties', {}) + .get('resourceAccessIdentity', {}).get('objectId') + ) + + # Get vault identity from either root level or properties level + vault_identity = ( + replication_vault.get('identity') or + replication_vault.get('properties', {}).get('identity') + ) + vault_identity_id = ( + vault_identity.get('principalId') if vault_identity else None + ) + + print("Granting permissions to the storage account...") + print(f" Source DRA Principal ID: {source_dra_object_id}") + print(f" Target DRA Principal ID: {target_dra_object_id}") + print(f" Vault Identity Principal ID: {vault_identity_id}") + + successful_assignments = [] + failed_assignments = [] + + # Create role assignments for source and target DRAs + for object_id in [source_dra_object_id, target_dra_object_id]: + if object_id: + for role_def_id in [ + RoleDefinitionIds.ContributorId, + RoleDefinitionIds.StorageBlobDataContributorId + ]: + try: + assignment_msg, _ = _assign_role_to_principal( + auth_client, storage_account_id, subscription_id, + object_id, role_def_id, "DRA" + ) + successful_assignments.append(assignment_msg) + except CLIError as e: + role_name = _get_role_name(role_def_id) + error_msg = f"{object_id[:8]} - {role_name}: {str(e)}" + failed_assignments.append(error_msg) + + # Grant vault identity permissions if exists + if vault_identity_id: + for role_def_id in [RoleDefinitionIds.ContributorId, + RoleDefinitionIds.StorageBlobDataContributorId]: + try: + assignment_msg, _ = _assign_role_to_principal( + auth_client, storage_account_id, subscription_id, + vault_identity_id, role_def_id, "vault" + ) + successful_assignments.append(assignment_msg) + except CLIError as e: + role_name = _get_role_name(role_def_id) + error_msg = f"{vault_identity_id[:8]} - {role_name}: {str(e)}" + failed_assignments.append(error_msg) + + # Report role assignment status + print("\nRole Assignment Summary:") + print(f" Successful: {len(successful_assignments)}") + if failed_assignments: + print(f" Failed: {len(failed_assignments)}") + for failure in failed_assignments: + print(f" - {failure}") + + # If there are failures, raise an error + if failed_assignments: + raise CLIError( + f"Failed to create {len(failed_assignments)} role " + f"assignment(s). " + "The storage account may not have proper permissions." + ) + + # Add a wait after role assignments to ensure propagation + time.sleep(120) + + # Verify role assignments were successful + expected_principal_ids = [ + source_dra_object_id, target_dra_object_id, vault_identity_id + ] + _verify_role_assignments( + auth_client, storage_account_id, expected_principal_ids + ) + + +def update_amh_solution_storage(cmd, + project_uri, + amh_solution, + storage_account_id): + """Update AMH solution with storage account ID if needed.""" + amh_solution_uri = ( + f"{project_uri}/solutions/" + f"Servers-Migration-ServerMigration_DataReplication" + ) + + if (amh_solution + .get('properties', {}) + .get('details', {}) + .get('extendedDetails', {}) + .get('replicationStorageAccountId')) != storage_account_id: + extended_details = (amh_solution + .get('properties', {}) + .get('details', {}) + .get('extendedDetails', {})) + extended_details['replicationStorageAccountId'] = ( + storage_account_id + ) + + solution_body = { + "properties": { + "details": { + "extendedDetails": extended_details + } + } + } + + create_or_update_resource( + cmd, amh_solution_uri, APIVersion.Microsoft_Migrate.value, + solution_body + ) + + # Wait for the AMH solution update to fully propagate + time.sleep(60) + + return amh_solution_uri diff --git a/src/migrate/azext_migrate/helpers/replication/init/_setup_policy.py b/src/migrate/azext_migrate/helpers/replication/init/_setup_policy.py new file mode 100644 index 00000000000..eab0993b9e1 --- /dev/null +++ b/src/migrate/azext_migrate/helpers/replication/init/_setup_policy.py @@ -0,0 +1,555 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import time +from knack.util import CLIError +from knack.log import get_logger +from azext_migrate.helpers._utils import ( + send_get_request, + get_resource_by_id, + delete_resource, + create_or_update_resource, + generate_hash_for_artifact, + APIVersion, + ProvisioningState, + AzLocalInstanceTypes, + FabricInstanceTypes, + ReplicationPolicyDetails, + StorageAccountProvisioningState +) + + +def determine_instance_types(source_site_id, target_site_id, + source_appliance_name, + target_appliance_name): + """Determine instance types based on site IDs.""" + hyperv_site_pattern = "/Microsoft.OffAzure/HyperVSites/" + vmware_site_pattern = "/Microsoft.OffAzure/VMwareSites/" + + if (hyperv_site_pattern in source_site_id and + hyperv_site_pattern in target_site_id): + instance_type = AzLocalInstanceTypes.HyperVToAzLocal.value + fabric_instance_type = FabricInstanceTypes.HyperVInstance.value + elif (vmware_site_pattern in source_site_id and + hyperv_site_pattern in target_site_id): + instance_type = AzLocalInstanceTypes.VMwareToAzLocal.value + fabric_instance_type = FabricInstanceTypes.VMwareInstance.value + else: + src_type = ( + 'VMware' if vmware_site_pattern in source_site_id + else 'HyperV' if hyperv_site_pattern in source_site_id + else 'Unknown' + ) + tgt_type = ( + 'VMware' if vmware_site_pattern in target_site_id + else 'HyperV' if hyperv_site_pattern in target_site_id + else 'Unknown' + ) + raise CLIError( + f"Error matching source '{source_appliance_name}' and target " + f"'{target_appliance_name}' appliances. Source is {src_type}, " + f"Target is {tgt_type}" + ) + + return instance_type, fabric_instance_type + + +def find_fabric(all_fabrics, appliance_name, fabric_instance_type, + amh_solution, is_source=True): + """Find and validate a fabric for the given appliance.""" + logger = get_logger(__name__) + fabric = None + fabric_candidates = [] + + for candidate in all_fabrics: + props = candidate.get('properties', {}) + custom_props = props.get('customProperties', {}) + fabric_name = candidate.get('name', '') + + # Check if this fabric matches our criteria + is_succeeded = (props.get('provisioningState') == + ProvisioningState.Succeeded.value) + + # Check solution ID match - handle case differences and trailing + # slashes + fabric_solution_id = (custom_props.get('migrationSolutionId', '') + .rstrip('/')) + expected_solution_id = amh_solution.get('id', '').rstrip('/') + is_correct_solution = (fabric_solution_id.lower() == + expected_solution_id.lower()) + + is_correct_instance = (custom_props.get('instanceType') == + fabric_instance_type) + + # Check if fabric name contains appliance name or vice versa + name_matches = ( + fabric_name.lower().startswith(appliance_name.lower()) or + appliance_name.lower() in fabric_name.lower() or + fabric_name.lower() in appliance_name.lower() or + f"{appliance_name.lower()}-" in fabric_name.lower() + ) + + # Collect potential candidates even if they don't fully match + if custom_props.get('instanceType') == fabric_instance_type: + fabric_candidates.append({ + 'name': fabric_name, + 'state': props.get('provisioningState'), + 'solution_match': is_correct_solution, + 'name_match': name_matches + }) + + if is_succeeded and is_correct_instance and name_matches: + # If solution doesn't match, log warning but still consider it + if not is_correct_solution: + logger.warning( + "Fabric '%s' matches name and type but has " + "different solution ID", fabric_name) + fabric = candidate + break + + if not fabric: + appliance_type_label = "source" if is_source else "target" + error_msg = ( + f"Couldn't find connected {appliance_type_label} appliance " + f"'{appliance_name}'.\n") + + if fabric_candidates: + error_msg += ( + f"Found {len(fabric_candidates)} fabric(s) with " + f"matching type '{fabric_instance_type}': \n") + for candidate in fabric_candidates: + error_msg += ( + f" - {candidate['name']} " + f"(state: {candidate['state']}, " + f"solution_match: {candidate['solution_match']}, " + f"name_match: {candidate['name_match']})\n") + error_msg += "\nPlease verify:\n" + error_msg += "1. The appliance name matches exactly\n" + error_msg += "2. The fabric is in 'Succeeded' state\n" + error_msg += ( + "3. The fabric belongs to the correct migration solution") + else: + error_msg += ( + f"No fabrics found with instance type " + f"'{fabric_instance_type}'.\n") + error_msg += "\nThis usually means:\n" + error_msg += ( + f"1. The {appliance_type_label} appliance " + f"'{appliance_name}' is not properly configured\n") + if (fabric_instance_type == + FabricInstanceTypes.VMwareInstance.value): + appliance_type = 'VMware' + elif (fabric_instance_type == + FabricInstanceTypes.HyperVInstance.value): + appliance_type = 'HyperV' + else: + appliance_type = 'Azure Local' + error_msg += ( + f"2. The appliance type doesn't match " + f"(expecting {appliance_type})\n") + error_msg += ( + "3. The fabric creation is still in progress - " + "wait a few minutes and retry") + + if all_fabrics: + error_msg += "\n\nAvailable fabrics in resource group:\n" + for fab in all_fabrics: + props = fab.get('properties', {}) + custom_props = props.get('customProperties', {}) + error_msg += ( + f" - {fab.get('name')} " + f"(type: {custom_props.get('instanceType')})\n") + + raise CLIError(error_msg) + + return fabric + + +def get_fabric_agent(cmd, replication_fabrics_uri, fabric, appliance_name, + fabric_instance_type): + """Get and validate fabric agent (DRA) for the given fabric.""" + fabric_name = fabric.get('name') + dras_uri = ( + f"{replication_fabrics_uri}/{fabric_name}" + f"/fabricAgents?api-version=" + f"{APIVersion.Microsoft_DataReplication.value}" + ) + dras_response = send_get_request(cmd, dras_uri) + dras = dras_response.json().get('value', []) + + dra = None + for candidate in dras: + props = candidate.get('properties', {}) + custom_props = props.get('customProperties', {}) + if (props.get('machineName') == appliance_name and + custom_props.get('instanceType') == fabric_instance_type and + bool(props.get('isResponsive'))): + dra = candidate + break + + if not dra: + raise CLIError( + f"The appliance '{appliance_name}' is in a disconnected state." + ) + + return dra + + +def setup_replication_policy(cmd, + rg_uri, + replication_vault_name, + instance_type): + """Setup or validate replication policy.""" + policy_name = f"{replication_vault_name}{instance_type}policy" + policy_uri = ( + f"{rg_uri}/providers/Microsoft.DataReplication/replicationVaults" + f"/{replication_vault_name}/replicationPolicies/{policy_name}" + ) + + # Try to get existing policy, handle not found gracefully + try: + policy = get_resource_by_id( + cmd, policy_uri, APIVersion.Microsoft_DataReplication.value + ) + except CLIError as e: + error_str = str(e) + if ("ResourceNotFound" in error_str or "404" in error_str or + "Not Found" in error_str): + # Policy doesn't exist, this is expected for new setups + print(f"Policy '{policy_name}' does not exist, will create it.") + policy = None + else: + # Some other error occurred, re-raise it + raise + + # Handle existing policy states + if policy: + provisioning_state = ( + policy + .get('properties', {}) + .get('provisioningState') + ) + + # Wait for creating/updating to complete + if provisioning_state in [ProvisioningState.Creating.value, + ProvisioningState.Updating.value]: + print( + f"Policy '{policy_name}' found in Provisioning State " + f"'{provisioning_state}'." + ) + for i in range(20): + time.sleep(30) + policy = get_resource_by_id( + cmd, policy_uri, + APIVersion.Microsoft_DataReplication.value + ) + if policy: + provisioning_state = ( + policy.get('properties', {}).get('provisioningState') + ) + if provisioning_state not in [ + ProvisioningState.Creating.value, + ProvisioningState.Updating.value]: + break + + # Remove policy if in bad state + if provisioning_state in [ProvisioningState.Canceled.value, + ProvisioningState.Failed.value]: + print( + f"Policy '{policy_name}' found in unusable state " + f"'{provisioning_state}'. Removing..." + ) + delete_resource( + cmd, policy_uri, APIVersion.Microsoft_DataReplication.value + ) + time.sleep(30) + policy = None + + # Create policy if needed + if not policy or ( + policy and + policy.get('properties', {}).get('provisioningState') == + ProvisioningState.Deleted.value): + print(f"Creating Policy '{policy_name}'...") + + recoveryPoint = ( + ReplicationPolicyDetails.RecoveryPointHistoryInMinutes + ) + crashConsistentFreq = ( + ReplicationPolicyDetails.CrashConsistentFrequencyInMinutes + ) + appConsistentFreq = ( + ReplicationPolicyDetails.AppConsistentFrequencyInMinutes + ) + + policy_body = { + "properties": { + "customProperties": { + "instanceType": instance_type, + "recoveryPointHistoryInMinutes": recoveryPoint, + "crashConsistentFrequencyInMinutes": crashConsistentFreq, + "appConsistentFrequencyInMinutes": appConsistentFreq + } + } + } + + create_or_update_resource( + cmd, + policy_uri, + APIVersion.Microsoft_DataReplication.value, + policy_body, + ) + + # Wait for policy creation + for i in range(20): + time.sleep(30) + try: + policy = get_resource_by_id( + cmd, policy_uri, + APIVersion.Microsoft_DataReplication.value + ) + except Exception as poll_error: + # During creation, it might still return 404 initially + if ("ResourceNotFound" in str(poll_error) or + "404" in str(poll_error)): + print(f"Policy creation in progress... ({i + 1}/20)") + continue + raise + + if policy: + provisioning_state = ( + policy.get('properties', {}).get('provisioningState') + ) + print(f"Policy state: {provisioning_state}") + if provisioning_state in [ + ProvisioningState.Succeeded.value, + ProvisioningState.Failed.value, + ProvisioningState.Canceled.value, + ProvisioningState.Deleted.value]: + break + + if not policy or ( + policy.get('properties', {}).get('provisioningState') != + ProvisioningState.Succeeded.value): + raise CLIError(f"Policy '{policy_name}' is not in Succeeded state.") + + return policy + + +def setup_cache_storage_account(cmd, rg_uri, amh_solution, + cache_storage_account_id, + source_site_id, source_appliance_name, + migrate_project, project_name): + """Setup or validate cache storage account.""" + logger = get_logger(__name__) + + amh_stored_storage_account_id = ( + amh_solution.get('properties', {}) + .get('details', {}) + .get('extendedDetails', {}) + .get('replicationStorageAccountId') + ) + cache_storage_account = None + + if amh_stored_storage_account_id: + # Check existing storage account + storage_account_name = amh_stored_storage_account_id.split("/")[8] + storage_uri = ( + f"{rg_uri}/providers/Microsoft.Storage/storageAccounts" + f"/{storage_account_name}" + ) + storage_account = get_resource_by_id( + cmd, storage_uri, APIVersion.Microsoft_Storage.value + ) + + if storage_account and ( + storage_account + .get('properties', {}) + .get('provisioningState') == + StorageAccountProvisioningState.Succeeded.value + ): + cache_storage_account = storage_account + if (cache_storage_account_id and + cache_storage_account['id'] != + cache_storage_account_id): + warning_msg = ( + f"A Cache Storage Account '{storage_account_name}' is " + f"already linked. " + ) + warning_msg += "Ignoring provided -cache_storage_account_id." + logger.warning(warning_msg) + + # Use user-provided storage account if no existing one + if not cache_storage_account and cache_storage_account_id: + storage_account_name = cache_storage_account_id.split("/")[8].lower() + storage_uri = ( + f"{rg_uri}/providers/Microsoft.Storage/storageAccounts/" + f"{storage_account_name}" + ) + user_storage_account = get_resource_by_id( + cmd, storage_uri, APIVersion.Microsoft_Storage.value + ) + + if user_storage_account and ( + user_storage_account + .get('properties', {}) + .get('provisioningState') == + StorageAccountProvisioningState.Succeeded.value + ): + cache_storage_account = user_storage_account + else: + error_msg = ( + f"Cache Storage Account with Id " + f"'{cache_storage_account_id}' not found " + ) + error_msg += "or not in valid state." + raise CLIError(error_msg) + + # Create new storage account if needed + if not cache_storage_account: + artifact = f"{source_site_id}/{source_appliance_name}" + suffix_hash = generate_hash_for_artifact(artifact) + if len(suffix_hash) > 14: + suffix_hash = suffix_hash[:14] + storage_account_name = f"migratersa{suffix_hash}" + + print(f"Creating Cache Storage Account '{storage_account_name}'...") + + storage_body = { + "location": migrate_project.get('location'), + "tags": {"Migrate Project": project_name}, + "sku": {"name": "Standard_LRS"}, + "kind": "StorageV2", + "properties": { + "allowBlobPublicAccess": False, + "allowCrossTenantReplication": True, + "minimumTlsVersion": "TLS1_2", + "networkAcls": { + "defaultAction": "Allow" + }, + "encryption": { + "services": { + "blob": {"enabled": True}, + "file": {"enabled": True} + }, + "keySource": "Microsoft.Storage" + }, + "accessTier": "Hot" + } + } + + storage_uri = ( + f"{rg_uri}/providers/Microsoft.Storage/storageAccounts" + f"/{storage_account_name}" + ) + cache_storage_account = create_or_update_resource( + cmd, + storage_uri, + APIVersion.Microsoft_Storage.value, + storage_body + ) + + for _ in range(20): + time.sleep(30) + cache_storage_account = get_resource_by_id( + cmd, + storage_uri, + APIVersion.Microsoft_Storage.value + ) + if cache_storage_account and ( + cache_storage_account + .get('properties', {}) + .get('provisioningState') == + StorageAccountProvisioningState.Succeeded.value + ): + break + + if not cache_storage_account or ( + cache_storage_account + .get('properties', {}) + .get('provisioningState') != + StorageAccountProvisioningState.Succeeded.value + ): + raise CLIError("Failed to setup Cache Storage Account.") + + return cache_storage_account + + +def verify_storage_account_network_settings(cmd, + rg_uri, + cache_storage_account): + """Verify and update storage account network settings if needed.""" + storage_account_id = cache_storage_account['id'] + + # Verify storage account network settings + print("Verifying storage account network configuration...") + network_acls = ( + cache_storage_account.get('properties', {}).get('networkAcls', {}) + ) + default_action = network_acls.get('defaultAction', 'Allow') + + if default_action != 'Allow': + print( + f"WARNING: Storage account network defaultAction is " + f"'{default_action}'. " + "This may cause permission issues." + ) + print( + "Updating storage account to allow public network access..." + ) + + # Update storage account to allow public access + storage_account_name = storage_account_id.split("/")[-1] + storage_uri = ( + f"{rg_uri}/providers/Microsoft.Storage/storageAccounts/" + f"{storage_account_name}" + ) + + update_body = { + "properties": { + "networkAcls": { + "defaultAction": "Allow" + } + } + } + + create_or_update_resource( + cmd, storage_uri, APIVersion.Microsoft_Storage.value, + update_body + ) + + # Wait for network update to propagate + time.sleep(30) + + +def get_all_fabrics(cmd, rg_uri, resource_group_name, + source_appliance_name, + target_appliance_name, project_name): + """Get all replication fabrics in the resource group.""" + replication_fabrics_uri = ( + f"{rg_uri}/providers/Microsoft.DataReplication/replicationFabrics" + ) + fabrics_uri = ( + f"{replication_fabrics_uri}?api-version=" + f"{APIVersion.Microsoft_DataReplication.value}" + ) + fabrics_response = send_get_request(cmd, fabrics_uri) + all_fabrics = fabrics_response.json().get('value', []) + + # If no fabrics exist at all, provide helpful message + if not all_fabrics: + raise CLIError( + f"No replication fabrics found in resource group " + f"'{resource_group_name}'. " + f"Please ensure that: \n" + f"1. The source appliance '{source_appliance_name}' is deployed " + f"and connected\n" + f"2. The target appliance '{target_appliance_name}' is deployed " + f"and connected\n" + f"3. Both appliances are registered with the Azure Migrate " + f"project '{project_name}'" + ) + + return all_fabrics, replication_fabrics_uri diff --git a/src/migrate/azext_migrate/helpers/replication/init/_validate.py b/src/migrate/azext_migrate/helpers/replication/init/_validate.py new file mode 100644 index 00000000000..d81a2418906 --- /dev/null +++ b/src/migrate/azext_migrate/helpers/replication/init/_validate.py @@ -0,0 +1,294 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import time +from knack.util import CLIError +from knack.log import get_logger +from azext_migrate.helpers._utils import ( + get_resource_by_id, + create_or_update_resource, + APIVersion, + ProvisioningState +) +import json + + +def validate_required_parameters(resource_group_name, + project_name, + source_appliance_name, + target_appliance_name): + # Validate required parameters + if not resource_group_name: + raise CLIError("resource_group_name is required.") + if not project_name: + raise CLIError("project_name is required.") + if not source_appliance_name: + raise CLIError("source_appliance_name is required.") + if not target_appliance_name: + raise CLIError("target_appliance_name is required.") + + +def get_and_validate_resource_group(cmd, subscription_id, + resource_group_name): + """Get and validate that the resource group exists.""" + rg_uri = (f"/subscriptions/{subscription_id}/" + f"resourceGroups/{resource_group_name}") + resource_group = get_resource_by_id( + cmd, rg_uri, APIVersion.Microsoft_Resources.value) + if not resource_group: + raise CLIError( + f"Resource group '{resource_group_name}' does not exist " + f"in the subscription.") + print(f"Selected Resource Group: '{resource_group_name}'") + return rg_uri + + +def get_migrate_project(cmd, project_uri, project_name): + """Get and validate migrate project.""" + migrate_project = get_resource_by_id( + cmd, project_uri, APIVersion.Microsoft_Migrate.value) + if not migrate_project: + raise CLIError(f"Migrate project '{project_name}' not found.") + + if (migrate_project.get('properties', {}).get('provisioningState') != + ProvisioningState.Succeeded.value): + raise CLIError( + f"Migrate project '{project_name}' is not in a valid state.") + + return migrate_project + + +def get_data_replication_solution(cmd, project_uri): + """Get Data Replication Service Solution.""" + amh_solution_name = ( + "Servers-Migration-ServerMigration_DataReplication") + amh_solution_uri = f"{project_uri}/solutions/{amh_solution_name}" + amh_solution = get_resource_by_id( + cmd, amh_solution_uri, APIVersion.Microsoft_Migrate.value) + if not amh_solution: + raise CLIError( + f"No Data Replication Service Solution " + f"'{amh_solution_name}' found.") + return amh_solution + + +def get_discovery_solution(cmd, project_uri): + """Get Discovery Solution.""" + discovery_solution_name = "Servers-Discovery-ServerDiscovery" + discovery_solution_uri = ( + f"{project_uri}/solutions/{discovery_solution_name}") + discovery_solution = get_resource_by_id( + cmd, discovery_solution_uri, APIVersion.Microsoft_Migrate.value) + if not discovery_solution: + raise CLIError( + f"Server Discovery Solution '{discovery_solution_name}' " + f"not found.") + return discovery_solution + + +def get_and_setup_replication_vault(cmd, amh_solution, rg_uri): + """Get and setup replication vault with managed identity.""" + # Validate Replication Vault + vault_id = (amh_solution.get('properties', {}) + .get('details', {}) + .get('extendedDetails', {}) + .get('vaultId')) + if not vault_id: + raise CLIError( + "No Replication Vault found. Please verify your " + "Azure Migrate project setup.") + + replication_vault_name = vault_id.split("/")[8] + vault_uri = ( + f"{rg_uri}/providers/Microsoft.DataReplication/" + f"replicationVaults/{replication_vault_name}") + replication_vault = get_resource_by_id( + cmd, vault_uri, APIVersion.Microsoft_DataReplication.value) + if not replication_vault: + raise CLIError( + f"No Replication Vault '{replication_vault_name}' found.") + + # Check if vault has managed identity, if not, enable it + vault_identity = ( + replication_vault.get('identity') or + replication_vault.get('properties', {}).get('identity') + ) + if not vault_identity or not vault_identity.get('principalId'): + print( + f"Replication vault '{replication_vault_name}' does not " + f"have a managed identity. " + "Enabling system-assigned identity..." + ) + + # Update vault to enable system-assigned managed identity + vault_update_body = { + "identity": { + "type": "SystemAssigned" + } + } + + replication_vault = create_or_update_resource( + cmd, vault_uri, APIVersion.Microsoft_DataReplication.value, + vault_update_body + ) + + # Wait for identity to be created + time.sleep(30) + + # Refresh vault to get the identity + replication_vault = get_resource_by_id( + cmd, vault_uri, APIVersion.Microsoft_DataReplication.value) + vault_identity = ( + replication_vault.get('identity') or + replication_vault.get('properties', {}).get('identity') + ) + + if not vault_identity or not vault_identity.get('principalId'): + raise CLIError( + f"Failed to enable managed identity for replication " + f"vault '{replication_vault_name}'") + + print( + f"✓ Enabled system-assigned managed identity. " + f"Principal ID: {vault_identity.get('principalId')}" + ) + else: + print( + f"✓ Replication vault has managed identity. " + f"Principal ID: {vault_identity.get('principalId')}") + + return replication_vault, replication_vault_name + + +def _store_appliance_site_mapping(app_map, appliance_name, site_id): + """Store appliance name to site ID mapping in both lowercase and + original case.""" + app_map[appliance_name.lower()] = site_id + app_map[appliance_name] = site_id + + +def _process_v3_dict_map(app_map, app_map_v3): + """Process V3 appliance map in dict format.""" + for appliance_name_key, site_info in app_map_v3.items(): + if isinstance(site_info, dict) and 'SiteId' in site_info: + _store_appliance_site_mapping( + app_map, appliance_name_key, site_info['SiteId']) + elif isinstance(site_info, str): + _store_appliance_site_mapping( + app_map, appliance_name_key, site_info) + + +def _process_v3_list_item(app_map, item): + """Process a single item from V3 appliance list.""" + if not isinstance(item, dict): + return + + # Check if it has ApplianceName/SiteId structure + if 'ApplianceName' in item and 'SiteId' in item: + _store_appliance_site_mapping( + app_map, item['ApplianceName'], item['SiteId']) + return + + # Or it might be a single key-value pair + for key, value in item.items(): + if isinstance(value, dict) and 'SiteId' in value: + _store_appliance_site_mapping( + app_map, key, value['SiteId']) + elif isinstance(value, str): + _store_appliance_site_mapping(app_map, key, value) + + +def _process_v3_appliance_map(app_map, app_map_v3): + """Process V3 appliance map data structure.""" + if isinstance(app_map_v3, dict): + _process_v3_dict_map(app_map, app_map_v3) + elif isinstance(app_map_v3, list): + for item in app_map_v3: + _process_v3_list_item(app_map, item) + + +def parse_appliance_mappings(discovery_solution): + """Parse appliance name to site ID mappings from discovery solution.""" + app_map = {} + extended_details = (discovery_solution.get('properties', {}) + .get('details', {}) + .get('extendedDetails', {})) + + # Process applianceNameToSiteIdMapV2 + if 'applianceNameToSiteIdMapV2' in extended_details: + try: + app_map_v2 = json.loads( + extended_details['applianceNameToSiteIdMapV2']) + if isinstance(app_map_v2, list): + for item in app_map_v2: + if (isinstance(item, dict) and + 'ApplianceName' in item and + 'SiteId' in item): + # Store both lowercase and original case + app_map[item['ApplianceName'].lower()] = ( + item['SiteId']) + app_map[item['ApplianceName']] = item['SiteId'] + except (json.JSONDecodeError, KeyError, TypeError) as e: + get_logger(__name__).warning( + "Failed to parse applianceNameToSiteIdMapV2: %s", str(e)) + + # Process applianceNameToSiteIdMapV3 + if 'applianceNameToSiteIdMapV3' in extended_details: + try: + app_map_v3 = json.loads( + extended_details['applianceNameToSiteIdMapV3']) + _process_v3_appliance_map(app_map, app_map_v3) + except (json.JSONDecodeError, KeyError, TypeError) as e: + get_logger(__name__).warning( + "Failed to parse applianceNameToSiteIdMapV3: %s", str(e)) + + if not app_map: + raise CLIError( + "Server Discovery Solution missing Appliance Details. " + "Invalid Solution.") + + return app_map + + +def validate_and_get_site_ids(app_map, source_appliance_name, + target_appliance_name): + """Validate appliance names and get their site IDs.""" + # Validate SourceApplianceName & TargetApplianceName - try both + # original and lowercase + source_site_id = (app_map.get(source_appliance_name) or + app_map.get(source_appliance_name.lower())) + target_site_id = (app_map.get(target_appliance_name) or + app_map.get(target_appliance_name.lower())) + + if not source_site_id: + # Provide helpful error message with available appliances + # (filter out duplicates) + available_appliances = list(set(k for k in app_map + if k not in app_map or + not k.islower())) + if not available_appliances: + # If all keys are lowercase, show them + available_appliances = list(set(app_map.keys())) + raise CLIError( + f"Source appliance '{source_appliance_name}' not in " + f"discovery solution. " + f"Available appliances: {','.join(available_appliances)}" + ) + if not target_site_id: + # Provide helpful error message with available appliances + # (filter out duplicates) + available_appliances = list(set(k for k in app_map + if k not in app_map or + not k.islower())) + if not available_appliances: + # If all keys are lowercase, show them + available_appliances = list(set(app_map.keys())) + raise CLIError( + f"Target appliance '{target_appliance_name}' not in " + f"discovery solution. " + f"Available appliances: {','.join(available_appliances)}" + ) + + return source_site_id, target_site_id diff --git a/src/migrate/azext_migrate/helpers/replication/job/_format.py b/src/migrate/azext_migrate/helpers/replication/job/_format.py new file mode 100644 index 00000000000..49dec6bc115 --- /dev/null +++ b/src/migrate/azext_migrate/helpers/replication/job/_format.py @@ -0,0 +1,133 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +Job formatting utilities for Azure Migrate local replication jobs. +""" + + +def calculate_duration(start_time, end_time): + """ + Calculate duration between two timestamps. + + Args: + start_time (str): ISO format start time + end_time (str, optional): ISO format end time + + Returns: + str: Formatted duration string or None + """ + if not start_time: + return None + + from datetime import datetime + try: + start = datetime.fromisoformat(start_time.replace('Z', '+00:00')) + if end_time: + end = datetime.fromisoformat(end_time.replace('Z', '+00:00')) + duration = end - start + total_seconds = int(duration.total_seconds()) + minutes, seconds = divmod(total_seconds, 60) + hours, minutes = divmod(minutes, 60) + + if hours > 0: + return f"{hours}h {minutes}m {seconds}s" + elif minutes > 0: + return f"{minutes}m {seconds}s" + else: + return f"{seconds}s" + else: + # Job still running + now = datetime.utcnow() + duration = now - start + total_seconds = int(duration.total_seconds()) + minutes, seconds = divmod(total_seconds, 60) + hours, minutes = divmod(minutes, 60) + + if hours > 0: + return f"{hours}h {minutes}m (in progress)" + elif minutes > 0: + return f"{minutes}m {seconds}s (in progress)" + else: + return f"{seconds}s (in progress)" + except Exception: + return None + + +def format_job_output(job_details): + """ + Format job details into a clean, user-friendly output. + + Args: + job_details (dict): Raw job details from the API + + Returns: + dict: Formatted job information + """ + props = job_details.get('properties', {}) + + # Extract key information + formatted = { + 'jobName': job_details.get('name'), + 'displayName': props.get('displayName'), + 'state': props.get('state'), + 'vmName': props.get('objectInternalName'), + 'startTime': props.get('startTime'), + 'endTime': props.get('endTime'), + 'duration': calculate_duration( + props.get('startTime'), + props.get('endTime'))} + + # Add error information if present + errors = props.get('errors', []) + if errors: + formatted['errors'] = [ + { + 'message': err.get('message'), + 'code': err.get('code'), + 'recommendation': err.get('recommendation') + } + for err in errors + ] + + # Add task progress + tasks = props.get('tasks', []) + if tasks: + formatted['tasks'] = [ + { + 'name': task.get('taskName'), + 'state': task.get('state'), + 'duration': calculate_duration(task.get('startTime'), task.get('endTime')) + } + for task in tasks + ] + + return formatted + + +def format_job_summary(job_details): + """ + Format job details into a summary for list output. + + Args: + job_details (dict): Raw job details from the API + + Returns: + dict: Formatted job summary + """ + props = job_details.get('properties', {}) + errors = props.get('errors') or [] + + return { + 'jobName': job_details.get('name'), + 'displayName': props.get('displayName'), + 'state': props.get('state'), + 'vmName': props.get('objectInternalName'), + 'startTime': props.get('startTime'), + 'endTime': props.get('endTime'), + 'duration': calculate_duration( + props.get('startTime'), + props.get('endTime')), + 'hasErrors': len(errors) > 0} diff --git a/src/migrate/azext_migrate/helpers/replication/job/_parse.py b/src/migrate/azext_migrate/helpers/replication/job/_parse.py new file mode 100644 index 00000000000..8ca5f366c43 --- /dev/null +++ b/src/migrate/azext_migrate/helpers/replication/job/_parse.py @@ -0,0 +1,120 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +Job ID parsing utilities for Azure Migrate local replication jobs. +""" + +from knack.util import CLIError + + +def parse_job_id(job_id): + """ + Parse a job ARM ID to extract vault name, resource group, and job name. + + Args: + job_id (str): The job ARM ID + + Returns: + tuple: (vault_name, resource_group_name, job_name) + + Raises: + CLIError: If the job ID format is invalid + """ + try: + job_id_parts = job_id.split("/") + if len(job_id_parts) < 11: + raise ValueError("Invalid job ID format") + + resource_group_name = job_id_parts[4] + vault_name = job_id_parts[8] + job_name = job_id_parts[10] + + return vault_name, resource_group_name, job_name + + except (IndexError, ValueError) as e: + raise CLIError( + f"Invalid job ID format: {job_id}. " + "Expected format: /subscriptions/{{subscription-id}}/" + "resourceGroups/{{resource-group}}/providers/" + "Microsoft.DataReplication/replicationVaults/{{vault-name}}/" + f"jobs/{{job-name}}. Error: {str(e)}" + ) + + +def get_vault_name_from_project(cmd, resource_group_name, + project_name, subscription_id): + """ + Get the vault name from the Azure Migrate project solution. + + Args: + cmd: The CLI command context + resource_group_name (str): Resource group name + project_name (str): Migrate project name + subscription_id (str): Subscription ID + + Returns: + str: The vault name + + Raises: + CLIError: If the solution or vault is not found + """ + from knack.log import get_logger + from azext_migrate.helpers._utils import get_resource_by_id, APIVersion + + logger = get_logger(__name__) + + # Get the migration solution + solution_name = "Servers-Migration-ServerMigration_DataReplication" + solution_uri = ( + f"/subscriptions/{subscription_id}/" + f"resourceGroups/{resource_group_name}/" + f"providers/Microsoft.Migrate/migrateProjects/{project_name}/" + f"solutions/{solution_name}" + ) + + logger.info( + "Retrieving solution '%s' from project '%s'", + solution_name, project_name) + + try: + solution = get_resource_by_id( + cmd, + solution_uri, + APIVersion.Microsoft_Migrate.value + ) + + if not solution: + raise CLIError( + f"Solution '{solution_name}' not found in project " + f"'{project_name}'.") + + # Extract vault ID from solution extended details + properties = solution.get('properties', {}) + details = properties.get('details', {}) + extended_details = details.get('extendedDetails', {}) + vault_id = extended_details.get('vaultId') + + if not vault_id: + raise CLIError( + "Vault ID not found in solution. The replication " + "infrastructure may not be initialized.") + + # Parse vault name from vault ID + vault_id_parts = vault_id.split("/") + if len(vault_id_parts) < 9: + raise CLIError(f"Invalid vault ID format: {vault_id}") + + vault_name = vault_id_parts[8] + return vault_name + + except CLIError: + raise + except Exception as e: + logger.error( + "Error retrieving vault from project '%s': %s", + project_name, str(e)) + raise CLIError( + f"Failed to retrieve vault information: {str(e)}") diff --git a/src/migrate/azext_migrate/helpers/replication/job/_retrieve.py b/src/migrate/azext_migrate/helpers/replication/job/_retrieve.py new file mode 100644 index 00000000000..a0f727b1fbb --- /dev/null +++ b/src/migrate/azext_migrate/helpers/replication/job/_retrieve.py @@ -0,0 +1,160 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +Job retrieval utilities for Azure Migrate local replication jobs. +""" + +from knack.util import CLIError +from knack.log import get_logger + +logger = get_logger(__name__) + + +def get_single_job(cmd, subscription_id, resource_group_name, + vault_name, job_name, format_job_output): + """ + Retrieve a single job by name. + + Args: + cmd: The CLI command context + subscription_id (str): Subscription ID + resource_group_name (str): Resource group name + vault_name (str): Vault name + job_name (str): Job name + format_job_output (callable): Function to format job output + + Returns: + dict: Formatted job details + + Raises: + CLIError: If the job is not found or cannot be retrieved + """ + from azext_migrate.helpers._utils import ( + get_resource_by_id, + APIVersion + ) + + job_uri = ( + f"/subscriptions/{subscription_id}/" + f"resourceGroups/{resource_group_name}/" + f"providers/Microsoft.DataReplication/" + f"replicationVaults/{vault_name}/" + f"jobs/{job_name}" + ) + + logger.info( + "Retrieving job '%s' from vault '%s'", + job_name, vault_name) + + try: + job_details = get_resource_by_id( + cmd, + job_uri, + APIVersion.Microsoft_DataReplication.value + ) + + if not job_details: + raise CLIError( + f"Job '{job_name}' not found in vault '{vault_name}'.") + + return format_job_output(job_details) + + except CLIError: + raise + except Exception as e: + logger.error( + "Error retrieving job '%s': %s", job_name, str(e)) + raise CLIError(f"Failed to retrieve job: {str(e)}") + + +def list_all_jobs(cmd, subscription_id, resource_group_name, + vault_name, format_job_summary): + """ + List all jobs in a vault with pagination support. + + Args: + cmd: The CLI command context + subscription_id (str): Subscription ID + resource_group_name (str): Resource group name + vault_name (str): Vault name + format_job_summary (callable): Function to format job summaries + + Returns: + list: List of formatted job summaries + + Raises: + CLIError: If jobs cannot be listed + """ + from azext_migrate.helpers._utils import ( + send_get_request, + APIVersion + ) + + if not vault_name: + raise CLIError( + "Unable to determine vault name. Please check your project " + "configuration.") + + jobs_uri = ( + f"/subscriptions/{subscription_id}/" + f"resourceGroups/{resource_group_name}/" + f"providers/Microsoft.DataReplication/" + f"replicationVaults/{vault_name}/" + f"jobs?api-version={APIVersion.Microsoft_DataReplication.value}" + ) + + request_uri = ( + f"{cmd.cli_ctx.cloud.endpoints.resource_manager}{jobs_uri}") + + logger.info( + "Listing jobs from vault '%s'", vault_name) + + try: + response = send_get_request(cmd, request_uri) + + if not response: + logger.warning("Empty response received when listing jobs") + return [] + + response_data = response.json() if hasattr(response, 'json') else {} + + if not response_data: + logger.warning("No data in response when listing jobs") + return [] + + jobs = response_data.get('value', []) + + if not jobs: + logger.info("No jobs found in vault '%s'", vault_name) + return [] + + # Handle pagination if nextLink is present + while response_data and response_data.get('nextLink'): + next_link = response_data['nextLink'] + response = send_get_request(cmd, next_link) + response_data = response.json() if ( + response and hasattr(response, 'json')) else {} + if response_data and response_data.get('value'): + jobs.extend(response_data['value']) + + logger.info( + "Retrieved %d jobs from vault '%s'", len(jobs), vault_name) + + # Format the jobs for cleaner output + formatted_jobs = [] + for job in jobs: + try: + formatted_jobs.append(format_job_summary(job)) + except Exception as format_error: + logger.warning("Error formatting job: %s", str(format_error)) + # Skip jobs that fail to format + continue + + return formatted_jobs + + except Exception as e: + logger.error("Error listing jobs: %s", str(e)) + raise CLIError(f"Failed to list jobs: {str(e)}") diff --git a/src/migrate/azext_migrate/helpers/replication/new/_execute_new.py b/src/migrate/azext_migrate/helpers/replication/new/_execute_new.py new file mode 100644 index 00000000000..f3e54b9598c --- /dev/null +++ b/src/migrate/azext_migrate/helpers/replication/new/_execute_new.py @@ -0,0 +1,400 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +# pylint: disable=line-too-long +# pylint: disable=possibly-used-before-assignment +from azext_migrate.helpers._utils import ( + get_resource_by_id, + create_or_update_resource, + APIVersion, + ProvisioningState, + SiteTypes, + VMNicSelection +) +import re +from knack.util import CLIError +from knack.log import get_logger + +logger = get_logger(__name__) + + +def get_ARC_resource_bridge_info(target_fabric, migrate_project): + target_fabric_custom_props = ( + target_fabric.get('properties', {}).get('customProperties', {})) + target_cluster_id = ( + target_fabric_custom_props.get('cluster', {}) + .get('resourceName', '')) + + if not target_cluster_id: + target_cluster_id = (target_fabric_custom_props + .get('azStackHciClusterName', '')) + + if not target_cluster_id: + target_cluster_id = (target_fabric_custom_props + .get('clusterName', '')) + + # Extract custom location from target fabric + custom_location_id = (target_fabric_custom_props + .get('customLocationRegion', '')) + + if not custom_location_id: + custom_location_id = (target_fabric_custom_props + .get('customLocationId', '')) + + if not custom_location_id: + if target_cluster_id: + cluster_parts = target_cluster_id.split('/') + if len(cluster_parts) >= 5: + custom_location_region = ( + migrate_project.get('location', 'eastus')) + custom_location_id = ( + f"/subscriptions/{cluster_parts[2]}/" + f"resourceGroups/{cluster_parts[4]}/providers/" + f"Microsoft.ExtendedLocation/customLocations/" + f"{cluster_parts[-1]}-customLocation" + ) + else: + custom_location_region = ( + migrate_project.get('location', 'eastus')) + else: + custom_location_region = ( + migrate_project.get('location', 'eastus')) + else: + custom_location_region = migrate_project.get('location', 'eastus') + return custom_location_id, custom_location_region, target_cluster_id + + +def construct_disk_and_nic_mapping(is_power_user_mode, + disk_to_include, + nic_to_include, + machine_props, + site_type, + os_disk_id, + target_virtual_switch_id, + target_test_virtual_switch_id): + disks = [] + nics = [] + + if is_power_user_mode: + if not disk_to_include or len(disk_to_include) == 0: + raise CLIError( + "At least one disk must be included for replication.") + + # Validate that exactly one disk is marked as OS disk + os_disks = [d for d in disk_to_include if d.get('isOSDisk', False)] + if len(os_disks) != 1: + raise CLIError( + "Exactly one disk must be designated as the OS disk.") + + # Process disks + for disk in disk_to_include: + disk_obj = { + 'diskId': disk.get('diskId'), + 'diskSizeGb': disk.get('diskSizeGb'), + 'diskFileFormat': disk.get('diskFileFormat', 'VHDX'), + 'isDynamic': disk.get('isDynamic', True), + 'isOSDisk': disk.get('isOSDisk', False) + } + disks.append(disk_obj) + + # Process NICs + for nic in nic_to_include: + nic_obj = { + 'nicId': nic.get('nicId'), + 'targetNetworkId': nic.get('targetNetworkId'), + 'testNetworkId': nic.get('testNetworkId', + nic.get('targetNetworkId')), + 'selectionTypeForFailover': nic.get( + 'selectionTypeForFailover', + VMNicSelection.SelectedByUser.value) + } + nics.append(nic_obj) + else: + machine_disks = machine_props.get('disks', []) + machine_nics = machine_props.get('networkAdapters', []) + + # Find OS disk and validate + os_disk_found = False + for disk in machine_disks: + if site_type == SiteTypes.HyperVSites.value: + disk_id = disk.get('instanceId') + disk_size = disk.get('maxSizeInBytes', 0) + else: # VMware + disk_id = disk.get('uuid') + disk_size = disk.get('maxSizeInBytes', 0) + + is_os_disk = disk_id == os_disk_id + if is_os_disk: + os_disk_found = True + # Round up to GB + disk_size_gb = (disk_size + (1024 ** 3 - 1)) // (1024 ** 3) + disk_obj = { + 'diskId': disk_id, + 'diskSizeGb': disk_size_gb, + 'diskFileFormat': 'VHDX', + 'isDynamic': True, + 'isOSDisk': is_os_disk + } + disks.append(disk_obj) + + # Validate that the specified OS disk was found + if not os_disk_found: + available_disks = [d['diskId'] for d in disks] + raise CLIError( + f"The specified OS disk ID '{os_disk_id}' was not found in the machine's disks. " + f"Available disk IDs: {', '.join(available_disks)}" + ) + + for nic in machine_nics: + nic_id = nic.get('nicId') + test_network_id = (target_test_virtual_switch_id or + target_virtual_switch_id) + + nic_obj = { + 'nicId': nic_id, + 'targetNetworkId': target_virtual_switch_id, + 'testNetworkId': test_network_id, + 'selectionTypeForFailover': VMNicSelection.SelectedByUser.value + } + nics.append(nic_obj) + return disks, nics + + +def _handle_configuration_validation(cmd, + subscription_id, + resource_group_name, + replication_vault_name, + machine_name, + machine_props, + target_vm_cpu_core, + target_vm_ram, + site_type): + protected_item_name = machine_name + protected_item_uri = ( + f"/subscriptions/{subscription_id}/resourceGroups" + f"/{resource_group_name}/providers/Microsoft.DataReplication" + f"/replicationVaults/{replication_vault_name}" + f"/protectedItems/{protected_item_name}" + ) + + try: + existing_item = get_resource_by_id( + cmd, + protected_item_uri, + APIVersion.Microsoft_DataReplication.value) + if existing_item: + protection_state = existing_item.get('properties', {}).get('protectionState') + logger.warning(f"Found existing protected item: {existing_item.get('id', 'unknown')}, state: {protection_state}") + + # If in failed state, offer helpful guidance + if protection_state in ['EnablingFailed', 'DisablingFailed', 'Failed']: + raise CLIError( + f"A failed replication exists for machine '{machine_name}' (state: {protection_state}). " + f"Please delete it first using Azure Portal or contact Azure Support. " + f"Protected item ID: {protected_item_uri}" + ) + else: + raise CLIError( + f"A replication already exists for machine '{machine_name}' (state: {protection_state}). " + "Remove it first before creating a new one.") + except (CLIError, ValueError, KeyError, TypeError) as e: + # Check if it's a 404 Not Found error - that's expected and fine + error_str = str(e) + logger.info(f"Exception during protected item check: {error_str}") + if ("ResourceNotFound" in error_str or "404" in error_str or + "Not Found" in error_str): + existing_item = None + else: + # Some other error occurred, re-raise it + raise + + # Determine Hyper-V generation + if site_type == SiteTypes.HyperVSites.value: + hyperv_generation = machine_props.get('generation', '1') + is_source_dynamic_memory = machine_props.get( + 'isDynamicMemoryEnabled', False) + else: # VMware + firmware = machine_props.get('firmware', 'BIOS') + hyperv_generation = '2' if firmware != 'BIOS' else '1' + is_source_dynamic_memory = False + + # Determine target CPU and RAM + source_cpu_cores = machine_props.get('numberOfProcessorCore', 2) + source_memory_mb = machine_props.get('allocatedMemoryInMB', 4096) + + if not target_vm_cpu_core: + target_vm_cpu_core = source_cpu_cores + + if not target_vm_ram: + target_vm_ram = max(source_memory_mb, 512) # Minimum 512MB + + if target_vm_cpu_core < 1 or target_vm_cpu_core > 240: + raise CLIError("Target VM CPU cores must be between 1 and 240.") + + if hyperv_generation == '1': + if target_vm_ram < 512 or target_vm_ram > 1048576: # 1TB + raise CLIError( + "Target VM RAM must be between 512 MB and 1048576 MB " + "(1 TB) for Generation 1 VMs.") + else: + if target_vm_ram < 32 or target_vm_ram > 12582912: # 12TB + raise CLIError( + "Target VM RAM must be between 32 MB and 12582912 MB " + "(12 TB) for Generation 2 VMs.") + + return (hyperv_generation, source_cpu_cores, is_source_dynamic_memory, + source_memory_mb, protected_item_uri, target_vm_cpu_core, + target_vm_ram) + + +def _build_custom_properties(instance_type, custom_location_id, + custom_location_region, + machine_id, disks, nics, target_vm_name, + target_resource_group_id, + target_storage_path_id, hyperv_generation, + target_vm_cpu_core, + source_cpu_cores, is_dynamic_ram_enabled, + is_source_dynamic_memory, + source_memory_mb, target_vm_ram, source_dra, + target_dra, + run_as_account_id, target_cluster_id): + """Build custom properties for protected item creation.""" + return { + "instanceType": instance_type, + "targetArcClusterCustomLocationId": custom_location_id or "", + "customLocationRegion": custom_location_region, + "fabricDiscoveryMachineId": machine_id, + "disksToInclude": [ + { + "diskId": disk["diskId"], + "diskSizeGB": disk["diskSizeGb"], + "diskFileFormat": disk["diskFileFormat"], + "isOsDisk": disk["isOSDisk"], + "isDynamic": disk["isDynamic"], + "diskPhysicalSectorSize": 512 + } + for disk in disks + ], + "targetVmName": target_vm_name, + "targetResourceGroupId": target_resource_group_id, + "storageContainerId": target_storage_path_id, + "hyperVGeneration": hyperv_generation, + "targetCpuCores": target_vm_cpu_core, + "sourceCpuCores": source_cpu_cores, + "isDynamicRam": (is_dynamic_ram_enabled + if is_dynamic_ram_enabled is not None + else is_source_dynamic_memory), + "sourceMemoryInMegaBytes": float(source_memory_mb), + "targetMemoryInMegaBytes": int(target_vm_ram), + "nicsToInclude": [ + { + "nicId": nic["nicId"], + "selectionTypeForFailover": nic["selectionTypeForFailover"], + "targetNetworkId": nic["targetNetworkId"], + "testNetworkId": nic.get("testNetworkId", "") + } + for nic in nics + ], + "dynamicMemoryConfig": { + "maximumMemoryInMegaBytes": 1048576, # Max for Gen 1 + "minimumMemoryInMegaBytes": 512, # Min for Gen 1 + "targetMemoryBufferPercentage": 20 + }, + "sourceFabricAgentName": source_dra.get('name'), + "targetFabricAgentName": target_dra.get('name'), + "runAsAccountId": run_as_account_id, + "targetHCIClusterId": target_cluster_id + } + + +# pylint: disable=too-many-locals +def create_protected_item(cmd, + subscription_id, + resource_group_name, + replication_vault_name, + machine_name, + machine_props, + target_vm_cpu_core, + target_vm_ram, + custom_location_id, + custom_location_region, + site_type, + instance_type, + disks, + nics, + target_vm_name, + target_resource_group_id, + target_storage_path_id, + is_dynamic_ram_enabled, + source_dra, + target_dra, + policy_name, + replication_extension_name, + machine_id, + run_as_account_id, + target_cluster_id): + + config_result = _handle_configuration_validation( + cmd, + subscription_id, + resource_group_name, + replication_vault_name, + machine_name, + machine_props, + target_vm_cpu_core, + target_vm_ram, + site_type + ) + (hyperv_generation, source_cpu_cores, is_source_dynamic_memory, + source_memory_mb, protected_item_uri, target_vm_cpu_core, + target_vm_ram) = config_result + + # Construct protected item properties with only the essential properties + custom_properties = _build_custom_properties( + instance_type, custom_location_id, custom_location_region, + machine_id, disks, nics, target_vm_name, target_resource_group_id, + target_storage_path_id, hyperv_generation, target_vm_cpu_core, + source_cpu_cores, is_dynamic_ram_enabled, is_source_dynamic_memory, + source_memory_mb, target_vm_ram, source_dra, target_dra, + run_as_account_id, target_cluster_id + ) + + protected_item_body = { + "properties": { + "policyName": policy_name, + "replicationExtensionName": replication_extension_name, + "customProperties": custom_properties + } + } + + response = create_or_update_resource( + cmd, + protected_item_uri, + APIVersion.Microsoft_DataReplication.value, + protected_item_body) + + # Extract job ID from response if available + job_id = None + if response and 'properties' in response: + props = response['properties'] + if 'lastSuccessfulEnableProtectionJob' in props: + job_info = props['lastSuccessfulEnableProtectionJob'] + if 'id' in job_info: + # Extract just the job name from the full ARM ID + job_id = job_info['id'].split('/')[-1] + elif 'lastEnableProtectionJob' in props: + job_info = props['lastEnableProtectionJob'] + if 'id' in job_info: + job_id = job_info['id'].split('/')[-1] + + print("Successfully initiated replication for machine '{}'.".format(machine_name)) + if job_id: + print("Job ID: {}".format(job_id)) + print("\nTo check job status, run:") + print(" az migrate local replication get-job --job-name {} " + "--resource-group {} " + "--project-name ".format(job_id, resource_group_name)) + + return response diff --git a/src/migrate/azext_migrate/_new_local_server_replication_helpers.py b/src/migrate/azext_migrate/helpers/replication/new/_process_inputs.py similarity index 51% rename from src/migrate/azext_migrate/_new_local_server_replication_helpers.py rename to src/migrate/azext_migrate/helpers/replication/new/_process_inputs.py index eb9703e173d..b1fe0deedc8 100644 --- a/src/migrate/azext_migrate/_new_local_server_replication_helpers.py +++ b/src/migrate/azext_migrate/helpers/replication/new/_process_inputs.py @@ -6,20 +6,14 @@ # pylint: disable=line-too-long # pylint: disable=possibly-used-before-assignment from azure.cli.core.commands.client_factory import get_subscription_id -from azext_migrate._helpers import ( +from azext_migrate.helpers._utils import ( send_get_request, get_resource_by_id, - create_or_update_resource, APIVersion, ProvisioningState, AzLocalInstanceTypes, - FabricInstanceTypes, - SiteTypes, - VMNicSelection, - validate_arm_id_format, - IdFormats + FabricInstanceTypes ) -import re import json from knack.util import CLIError from knack.log import get_logger @@ -27,317 +21,6 @@ logger = get_logger(__name__) -def _process_v2_dict(extended_details, app_map): - try: - app_map_v2 = json.loads( - extended_details['applianceNameToSiteIdMapV2']) - if isinstance(app_map_v2, list): - for item in app_map_v2: - if (isinstance(item, dict) and - 'ApplianceName' in item and - 'SiteId' in item): - # Store both lowercase and original case - app_map[item['ApplianceName'].lower()] = item['SiteId'] - app_map[item['ApplianceName']] = item['SiteId'] - except (json.JSONDecodeError, KeyError, TypeError): - pass - return app_map - - -def _process_v3_dict_map(app_map_v3, app_map): - for appliance_name_key, site_info in app_map_v3.items(): - if isinstance(site_info, dict) and 'SiteId' in site_info: - app_map[appliance_name_key.lower()] = site_info['SiteId'] - app_map[appliance_name_key] = site_info['SiteId'] - elif isinstance(site_info, str): - app_map[appliance_name_key.lower()] = site_info - app_map[appliance_name_key] = site_info - return app_map - - -def _process_v3_dict_list(app_map_v3, app_map): - # V3 might also be in list format - for item in app_map_v3: - if isinstance(item, dict): - # Check if it has ApplianceName/SiteId structure - if 'ApplianceName' in item and 'SiteId' in item: - app_map[item['ApplianceName'].lower()] = item['SiteId'] - app_map[item['ApplianceName']] = item['SiteId'] - else: - # Or it might be a single key-value pair - for key, value in item.items(): - if isinstance(value, dict) and 'SiteId' in value: - app_map[key.lower()] = value['SiteId'] - app_map[key] = value['SiteId'] - elif isinstance(value, str): - app_map[key.lower()] = value - app_map[key] = value - return app_map - - -def _process_v3_dict(extended_details, app_map): - try: - app_map_v3 = json.loads(extended_details['applianceNameToSiteIdMapV3']) - if isinstance(app_map_v3, dict): - app_map = _process_v3_dict_map(app_map_v3, app_map) - elif isinstance(app_map_v3, list): - app_map = _process_v3_dict_list(app_map_v3, app_map) - except (json.JSONDecodeError, KeyError, TypeError): - pass - return app_map - - -def validate_server_parameters( - cmd, - machine_id, - machine_index, - project_name, - resource_group_name, - source_appliance_name, - subscription_id): - # Validate that either machine_id or machine_index is provided - if not machine_id and not machine_index: - raise CLIError( - "Either machine_id or machine_index must be provided.") - if machine_id and machine_index: - raise CLIError( - "Only one of machine_id or machine_index should be " - "provided, not both.") - - if not subscription_id: - subscription_id = get_subscription_id(cmd.cli_ctx) - - if machine_index: - if not project_name: - raise CLIError( - "project_name is required when using machine_index.") - if not resource_group_name: - raise CLIError( - "resource_group_name is required when using " - "machine_index.") - - if not isinstance(machine_index, int) or machine_index < 1: - raise CLIError( - "machine_index must be a positive integer " - "(1-based index).") - - rg_uri = ( - f"/subscriptions/{subscription_id}/" - f"resourceGroups/{resource_group_name}") - discovery_solution_name = "Servers-Discovery-ServerDiscovery" - discovery_solution_uri = ( - f"{rg_uri}/providers/Microsoft.Migrate/migrateprojects" - f"/{project_name}/solutions/{discovery_solution_name}" - ) - discovery_solution = get_resource_by_id( - cmd, discovery_solution_uri, APIVersion.Microsoft_Migrate.value) - - if not discovery_solution: - raise CLIError( - f"Server Discovery Solution '{discovery_solution_name}' " - f"not in project '{project_name}'.") - - # Get appliance mapping to determine site type - app_map = {} - extended_details = ( - discovery_solution.get('properties', {}) - .get('details', {}) - .get('extendedDetails', {})) - - # Process applianceNameToSiteIdMapV2 and V3 - if 'applianceNameToSiteIdMapV2' in extended_details: - app_map = _process_v2_dict(extended_details, app_map) - - if 'applianceNameToSiteIdMapV3' in extended_details: - app_map = _process_v3_dict(extended_details, app_map) - - # Get source site ID - try both original and lowercase - source_site_id = ( - app_map.get(source_appliance_name) or - app_map.get(source_appliance_name.lower())) - if not source_site_id: - raise CLIError( - f"Source appliance '{source_appliance_name}' " - f"not in discovery solution.") - - # Determine site type from source site ID - hyperv_site_pattern = "/Microsoft.OffAzure/HyperVSites/" - vmware_site_pattern = "/Microsoft.OffAzure/VMwareSites/" - - if hyperv_site_pattern in source_site_id: - site_name = source_site_id.split('/')[-1] - machines_uri = ( - f"{rg_uri}/providers/Microsoft.OffAzure/" - f"HyperVSites/{site_name}/machines") - elif vmware_site_pattern in source_site_id: - site_name = source_site_id.split('/')[-1] - machines_uri = ( - f"{rg_uri}/providers/Microsoft.OffAzure/" - f"VMwareSites/{site_name}/machines") - else: - raise CLIError( - f"Unable to determine site type for source appliance " - f"'{source_appliance_name}'.") - - # Get all machines from the site - request_uri = ( - f"{cmd.cli_ctx.cloud.endpoints.resource_manager}" - f"{machines_uri}?api-version={APIVersion.Microsoft_OffAzure.value}" - ) - - response = send_get_request(cmd, request_uri) - machines_data = response.json() - machines = machines_data.get('value', []) - - # Fetch all pages if there are more - while machines_data.get('nextLink'): - response = send_get_request(cmd, machines_data.get('nextLink')) - machines_data = response.json() - machines.extend(machines_data.get('value', [])) - - # Check if the index is valid - if machine_index > len(machines): - raise CLIError( - f"Invalid machine_index {machine_index}. " - f"Only {len(machines)} machines found in site '{site_name}'.") - - # Get the machine at the specified index (convert 1-based to 0-based) - selected_machine = machines[machine_index - 1] - machine_id = selected_machine.get('id') - return rg_uri - - -def validate_required_parameters(machine_id, - target_storage_path_id, - target_resource_group_id, - target_vm_name, - source_appliance_name, - target_appliance_name, - disk_to_include, - nic_to_include, - target_virtual_switch_id, - os_disk_id, - is_dynamic_memory_enabled): - # Validate required parameters - if not machine_id: - raise CLIError("machine_id could not be determined.") - if not target_storage_path_id: - raise CLIError("target_storage_path_id is required.") - if not target_resource_group_id: - raise CLIError("target_resource_group_id is required.") - if not target_vm_name: - raise CLIError("target_vm_name is required.") - if not source_appliance_name: - raise CLIError("source_appliance_name is required.") - if not target_appliance_name: - raise CLIError("target_appliance_name is required.") - - # Validate parameter set requirements - is_power_user_mode = (disk_to_include is not None or - nic_to_include is not None) - is_default_user_mode = (target_virtual_switch_id is not None or - os_disk_id is not None) - - if is_power_user_mode and is_default_user_mode: - raise CLIError( - "Cannot mix default user mode parameters " - "(target_virtual_switch_id, os_disk_id) with power user mode " - "parameters (disk_to_include, nic_to_include).") - - if is_power_user_mode: - # Power user mode validation - if not disk_to_include: - raise CLIError( - "disk_to_include is required when using power user mode.") - if not nic_to_include: - raise CLIError( - "nic_to_include is required when using power user mode.") - else: - # Default user mode validation - if not target_virtual_switch_id: - raise CLIError( - "target_virtual_switch_id is required when using " - "default user mode.") - if not os_disk_id: - raise CLIError( - "os_disk_id is required when using default user mode.") - - is_dynamic_ram_enabled = None - if is_dynamic_memory_enabled: - if is_dynamic_memory_enabled not in ['true', 'false']: - raise CLIError( - "is_dynamic_memory_enabled must be either " - "'true' or 'false'.") - is_dynamic_ram_enabled = is_dynamic_memory_enabled == 'true' - return is_dynamic_ram_enabled, is_power_user_mode - - -def validate_ARM_id_formats(machine_id, - target_storage_path_id, - target_resource_group_id, - target_virtual_switch_id, - target_test_virtual_switch_id): - # Validate ARM ID formats - if not validate_arm_id_format( - machine_id, - IdFormats.MachineArmIdTemplate): - raise CLIError( - f"Invalid -machine_id '{machine_id}'. " - f"A valid machine ARM ID should follow the format " - f"'{IdFormats.MachineArmIdTemplate}'.") - - if not validate_arm_id_format( - target_storage_path_id, - IdFormats.StoragePathArmIdTemplate): - raise CLIError( - f"Invalid -target_storage_path_id " - f"'{target_storage_path_id}'. " - f"A valid storage path ARM ID should follow the format " - f"'{IdFormats.StoragePathArmIdTemplate}'.") - - if not validate_arm_id_format( - target_resource_group_id, - IdFormats.ResourceGroupArmIdTemplate): - raise CLIError( - f"Invalid -target_resource_group_id " - f"'{target_resource_group_id}'. " - f"A valid resource group ARM ID should follow the format " - f"'{IdFormats.ResourceGroupArmIdTemplate}'.") - - if (target_virtual_switch_id and - not validate_arm_id_format( - target_virtual_switch_id, - IdFormats.LogicalNetworkArmIdTemplate)): - raise CLIError( - f"Invalid -target_virtual_switch_id " - f"'{target_virtual_switch_id}'. " - f"A valid logical network ARM ID should follow the format " - f"'{IdFormats.LogicalNetworkArmIdTemplate}'.") - - if (target_test_virtual_switch_id and - not validate_arm_id_format( - target_test_virtual_switch_id, - IdFormats.LogicalNetworkArmIdTemplate)): - raise CLIError( - f"Invalid -target_test_virtual_switch_id " - f"'{target_test_virtual_switch_id}'. " - f"A valid logical network ARM ID should follow the format " - f"'{IdFormats.LogicalNetworkArmIdTemplate}'.") - - machine_id_parts = machine_id.split("/") - if len(machine_id_parts) < 11: - raise CLIError(f"Invalid machine ARM ID format: '{machine_id}'") - - resource_group_name = machine_id_parts[4] - site_type = machine_id_parts[7] - site_name = machine_id_parts[8] - machine_name = machine_id_parts[10] - - run_as_account_id = None - instance_type = None - return site_type, site_name, machine_name, run_as_account_id, instance_type, resource_group_name - - def process_site_type_hyperV(cmd, rg_uri, site_name, @@ -1112,388 +795,3 @@ def process_target_fabric(cmd, f"disconnected state.") return target_fabric, source_dra, target_dra - - -def validate_replication_extension(cmd, - rg_uri, - source_fabric, - target_fabric, - replication_vault_name): - source_fabric_id = source_fabric['id'] - target_fabric_id = target_fabric['id'] - source_fabric_short_name = source_fabric_id.split('/')[-1] - target_fabric_short_name = target_fabric_id.split('/')[-1] - replication_extension_name = ( - f"{source_fabric_short_name}-{target_fabric_short_name}-" - f"MigReplicationExtn") - extension_uri = ( - f"{rg_uri}/providers/Microsoft.DataReplication" - f"/replicationVaults/{replication_vault_name}" - f"/replicationExtensions/{replication_extension_name}" - ) - replication_extension = get_resource_by_id( - cmd, extension_uri, APIVersion.Microsoft_DataReplication.value) - - if not replication_extension: - raise CLIError( - f"The replication extension '{replication_extension_name}' " - f"not found. Run 'az migrate local replication init' first.") - - extension_state = (replication_extension.get('properties', {}) - .get('provisioningState')) - - if extension_state != ProvisioningState.Succeeded.value: - raise CLIError( - f"The replication extension '{replication_extension_name}' " - f"is not ready. State: '{extension_state}'") - return replication_extension_name - - -def get_ARC_resource_bridge_info(target_fabric, migrate_project): - target_fabric_custom_props = ( - target_fabric.get('properties', {}).get('customProperties', {})) - target_cluster_id = ( - target_fabric_custom_props.get('cluster', {}) - .get('resourceName', '')) - - if not target_cluster_id: - target_cluster_id = (target_fabric_custom_props - .get('azStackHciClusterName', '')) - - if not target_cluster_id: - target_cluster_id = (target_fabric_custom_props - .get('clusterName', '')) - - # Extract custom location from target fabric - custom_location_id = (target_fabric_custom_props - .get('customLocationRegion', '')) - - if not custom_location_id: - custom_location_id = (target_fabric_custom_props - .get('customLocationId', '')) - - if not custom_location_id: - if target_cluster_id: - cluster_parts = target_cluster_id.split('/') - if len(cluster_parts) >= 5: - custom_location_region = ( - migrate_project.get('location', 'eastus')) - custom_location_id = ( - f"/subscriptions/{cluster_parts[2]}/" - f"resourceGroups/{cluster_parts[4]}/providers/" - f"Microsoft.ExtendedLocation/customLocations/" - f"{cluster_parts[-1]}-customLocation" - ) - else: - custom_location_region = ( - migrate_project.get('location', 'eastus')) - else: - custom_location_region = ( - migrate_project.get('location', 'eastus')) - else: - custom_location_region = migrate_project.get('location', 'eastus') - return custom_location_id, custom_location_region, target_cluster_id - - -def validate_target_VM_name(target_vm_name): - if len(target_vm_name) == 0 or len(target_vm_name) > 64: - raise CLIError( - "The target virtual machine name must be between 1 and 64 " - "characters long.") - - vm_name_pattern = r"^[^_\W][a-zA-Z0-9\-]{0,63}(? 240: - raise CLIError("Target VM CPU cores must be between 1 and 240.") - - if hyperv_generation == '1': - if target_vm_ram < 512 or target_vm_ram > 1048576: # 1TB - raise CLIError( - "Target VM RAM must be between 512 MB and 1048576 MB " - "(1 TB) for Generation 1 VMs.") - else: - if target_vm_ram < 32 or target_vm_ram > 12582912: # 12TB - raise CLIError( - "Target VM RAM must be between 32 MB and 12582912 MB " - "(12 TB) for Generation 2 VMs.") - - return (hyperv_generation, source_cpu_cores, is_source_dynamic_memory, - source_memory_mb, protected_item_uri) - - -def _build_custom_properties(instance_type, custom_location_id, - custom_location_region, - machine_id, disks, nics, target_vm_name, - target_resource_group_id, - target_storage_path_id, hyperv_generation, - target_vm_cpu_core, - source_cpu_cores, is_dynamic_ram_enabled, - is_source_dynamic_memory, - source_memory_mb, target_vm_ram, source_dra, - target_dra, - run_as_account_id, target_cluster_id): - """Build custom properties for protected item creation.""" - return { - "instanceType": instance_type, - "targetArcClusterCustomLocationId": custom_location_id or "", - "customLocationRegion": custom_location_region, - "fabricDiscoveryMachineId": machine_id, - "disksToInclude": [ - { - "diskId": disk["diskId"], - "diskSizeGB": disk["diskSizeGb"], - "diskFileFormat": disk["diskFileFormat"], - "isOsDisk": disk["isOSDisk"], - "isDynamic": disk["isDynamic"], - "diskPhysicalSectorSize": 512 - } - for disk in disks - ], - "targetVmName": target_vm_name, - "targetResourceGroupId": target_resource_group_id, - "storageContainerId": target_storage_path_id, - "hyperVGeneration": hyperv_generation, - "targetCpuCores": target_vm_cpu_core, - "sourceCpuCores": source_cpu_cores, - "isDynamicRam": (is_dynamic_ram_enabled - if is_dynamic_ram_enabled is not None - else is_source_dynamic_memory), - "sourceMemoryInMegaBytes": float(source_memory_mb), - "targetMemoryInMegaBytes": int(target_vm_ram), - "nicsToInclude": [ - { - "nicId": nic["nicId"], - "selectionTypeForFailover": nic["selectionTypeForFailover"], - "targetNetworkId": nic["targetNetworkId"], - "testNetworkId": nic.get("testNetworkId", "") - } - for nic in nics - ], - "dynamicMemoryConfig": { - "maximumMemoryInMegaBytes": 1048576, # Max for Gen 1 - "minimumMemoryInMegaBytes": 512, # Min for Gen 1 - "targetMemoryBufferPercentage": 20 - }, - "sourceFabricAgentName": source_dra.get('name'), - "targetFabricAgentName": target_dra.get('name'), - "runAsAccountId": run_as_account_id, - "targetHCIClusterId": target_cluster_id - } - - -# pylint: disable=too-many-locals -def create_protected_item(cmd, - subscription_id, - resource_group_name, - replication_vault_name, - machine_name, - machine_props, - target_vm_cpu_core, - target_vm_ram, - custom_location_id, - custom_location_region, - site_type, - instance_type, - disks, - nics, - target_vm_name, - target_resource_group_id, - target_storage_path_id, - is_dynamic_ram_enabled, - source_dra, - target_dra, - policy_name, - replication_extension_name, - machine_id, - run_as_account_id, - target_cluster_id): - - config_result = _handle_configuration_validation( - cmd, - subscription_id, - resource_group_name, - replication_vault_name, - machine_name, - machine_props, - target_vm_cpu_core, - target_vm_ram, - site_type - ) - (hyperv_generation, source_cpu_cores, is_source_dynamic_memory, - source_memory_mb, protected_item_uri) = config_result - - # Construct protected item properties with only the essential properties - custom_properties = _build_custom_properties( - instance_type, custom_location_id, custom_location_region, - machine_id, disks, nics, target_vm_name, target_resource_group_id, - target_storage_path_id, hyperv_generation, target_vm_cpu_core, - source_cpu_cores, is_dynamic_ram_enabled, is_source_dynamic_memory, - source_memory_mb, target_vm_ram, source_dra, target_dra, - run_as_account_id, target_cluster_id - ) - - protected_item_body = { - "properties": { - "policyName": policy_name, - "replicationExtensionName": replication_extension_name, - "customProperties": custom_properties - } - } - - create_or_update_resource( - cmd, - protected_item_uri, - APIVersion.Microsoft_DataReplication.value, - protected_item_body) - - print(f"Successfully initiated replication for machine " - f"'{machine_name}'.") diff --git a/src/migrate/azext_migrate/helpers/replication/new/_validate.py b/src/migrate/azext_migrate/helpers/replication/new/_validate.py new file mode 100644 index 00000000000..3379436c4a4 --- /dev/null +++ b/src/migrate/azext_migrate/helpers/replication/new/_validate.py @@ -0,0 +1,441 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +# pylint: disable=line-too-long +# pylint: disable=possibly-used-before-assignment +from azure.cli.core.commands.client_factory import get_subscription_id +from azext_migrate.helpers._utils import ( + send_get_request, + get_resource_by_id, + APIVersion, + ProvisioningState, + validate_arm_id_format, + IdFormats +) +import json +from knack.util import CLIError +from knack.log import get_logger +import re + +logger = get_logger(__name__) + + +def _process_v2_dict(extended_details, app_map): + try: + app_map_v2 = json.loads( + extended_details['applianceNameToSiteIdMapV2']) + if isinstance(app_map_v2, list): + for item in app_map_v2: + if (isinstance(item, dict) and + 'ApplianceName' in item and + 'SiteId' in item): + # Store both lowercase and original case + app_map[item['ApplianceName'].lower()] = item['SiteId'] + app_map[item['ApplianceName']] = item['SiteId'] + except (json.JSONDecodeError, KeyError, TypeError): + pass + return app_map + + +def _process_v3_dict_map(app_map_v3, app_map): + for appliance_name_key, site_info in app_map_v3.items(): + if isinstance(site_info, dict) and 'SiteId' in site_info: + app_map[appliance_name_key.lower()] = site_info['SiteId'] + app_map[appliance_name_key] = site_info['SiteId'] + elif isinstance(site_info, str): + app_map[appliance_name_key.lower()] = site_info + app_map[appliance_name_key] = site_info + return app_map + + +def _process_v3_dict_list(app_map_v3, app_map): + # V3 might also be in list format + for item in app_map_v3: + if isinstance(item, dict): + # Check if it has ApplianceName/SiteId structure + if 'ApplianceName' in item and 'SiteId' in item: + app_map[item['ApplianceName'].lower()] = item['SiteId'] + app_map[item['ApplianceName']] = item['SiteId'] + else: + # Or it might be a single key-value pair + for key, value in item.items(): + if isinstance(value, dict) and 'SiteId' in value: + app_map[key.lower()] = value['SiteId'] + app_map[key] = value['SiteId'] + elif isinstance(value, str): + app_map[key.lower()] = value + app_map[key] = value + return app_map + + +def _process_v3_dict(extended_details, app_map): + try: + app_map_v3 = json.loads(extended_details['applianceNameToSiteIdMapV3']) + if isinstance(app_map_v3, dict): + app_map = _process_v3_dict_map(app_map_v3, app_map) + elif isinstance(app_map_v3, list): + app_map = _process_v3_dict_list(app_map_v3, app_map) + except (json.JSONDecodeError, KeyError, TypeError): + pass + return app_map + + +def validate_server_parameters( + cmd, + machine_id, + machine_index, + project_name, + resource_group_name, + source_appliance_name, + subscription_id): + # Validate that either machine_id or machine_index is provided + if not machine_id and not machine_index: + raise CLIError( + "Either machine_id or machine_index must be provided.") + if machine_id and machine_index: + raise CLIError( + "Only one of machine_id or machine_index should be " + "provided, not both.") + + if not subscription_id: + subscription_id = get_subscription_id(cmd.cli_ctx) + + # Initialize rg_uri - will be set based on machine_id or resource_group_name + rg_uri = None + + if machine_index: + if not project_name: + raise CLIError( + "project_name is required when using machine_index.") + if not resource_group_name: + raise CLIError( + "resource_group_name is required when using " + "machine_index.") + + if not isinstance(machine_index, int) or machine_index < 1: + raise CLIError( + "machine_index must be a positive integer " + "(1-based index).") + + rg_uri = ( + f"/subscriptions/{subscription_id}/" + f"resourceGroups/{resource_group_name}") + discovery_solution_name = "Servers-Discovery-ServerDiscovery" + discovery_solution_uri = ( + f"{rg_uri}/providers/Microsoft.Migrate/migrateprojects" + f"/{project_name}/solutions/{discovery_solution_name}" + ) + discovery_solution = get_resource_by_id( + cmd, discovery_solution_uri, APIVersion.Microsoft_Migrate.value) + + if not discovery_solution: + raise CLIError( + f"Server Discovery Solution '{discovery_solution_name}' " + f"not in project '{project_name}'.") + + # Get appliance mapping to determine site type + app_map = {} + extended_details = ( + discovery_solution.get('properties', {}) + .get('details', {}) + .get('extendedDetails', {})) + + # Process applianceNameToSiteIdMapV2 and V3 + if 'applianceNameToSiteIdMapV2' in extended_details: + app_map = _process_v2_dict(extended_details, app_map) + + if 'applianceNameToSiteIdMapV3' in extended_details: + app_map = _process_v3_dict(extended_details, app_map) + + # Get source site ID - try both original and lowercase + source_site_id = ( + app_map.get(source_appliance_name) or + app_map.get(source_appliance_name.lower())) + if not source_site_id: + raise CLIError( + f"Source appliance '{source_appliance_name}' " + f"not in discovery solution.") + + # Determine site type from source site ID + hyperv_site_pattern = "/Microsoft.OffAzure/HyperVSites/" + vmware_site_pattern = "/Microsoft.OffAzure/VMwareSites/" + + if hyperv_site_pattern in source_site_id: + site_name = source_site_id.split('/')[-1] + machines_uri = ( + f"{rg_uri}/providers/Microsoft.OffAzure/" + f"HyperVSites/{site_name}/machines") + elif vmware_site_pattern in source_site_id: + site_name = source_site_id.split('/')[-1] + machines_uri = ( + f"{rg_uri}/providers/Microsoft.OffAzure/" + f"VMwareSites/{site_name}/machines") + else: + raise CLIError( + f"Unable to determine site type for source appliance " + f"'{source_appliance_name}'.") + + # Get all machines from the site + request_uri = ( + f"{cmd.cli_ctx.cloud.endpoints.resource_manager}" + f"{machines_uri}?api-version={APIVersion.Microsoft_OffAzure.value}" + ) + + response = send_get_request(cmd, request_uri) + machines_data = response.json() + machines = machines_data.get('value', []) + + # Fetch all pages if there are more + while machines_data.get('nextLink'): + response = send_get_request(cmd, machines_data.get('nextLink')) + machines_data = response.json() + machines.extend(machines_data.get('value', [])) + + # Check if the index is valid + if machine_index > len(machines): + raise CLIError( + f"Invalid machine_index {machine_index}. " + f"Only {len(machines)} machines found in site '{site_name}'.") + + # Get the machine at the specified index (convert 1-based to 0-based) + selected_machine = machines[machine_index - 1] + machine_id = selected_machine.get('id') + else: + # machine_id was provided directly + # Check if it's in Microsoft.Migrate format and needs to be resolved + if "/Microsoft.Migrate/MigrateProjects/" in machine_id or "/Microsoft.Migrate/migrateprojects/" in machine_id: + # This is a Migrate Project machine ID, need to resolve to OffAzure machine ID + migrate_machine = get_resource_by_id( + cmd, machine_id, APIVersion.Microsoft_Migrate.value) + + if not migrate_machine: + raise CLIError( + f"Machine not found with ID '{machine_id}'.") + + # Get the actual OffAzure machine ID from properties + machine_props = migrate_machine.get('properties', {}) + discovery_data = machine_props.get('discoveryData', []) + + # Find the OS discovery data entry which contains the actual machine reference + offazure_machine_id = None + for data in discovery_data: + if data.get('osType'): + # The extended data should contain the actual machine ARM ID + extended_data = data.get('extendedInfo', {}) + # Try different possible field names for the OffAzure machine ID + offazure_machine_id = ( + extended_data.get('sdsArmId') or + extended_data.get('machineArmId') or + extended_data.get('machineId') + ) + if offazure_machine_id: + break + + # If not found in discoveryData, check other properties + if not offazure_machine_id: + offazure_machine_id = machine_props.get('machineId') or machine_props.get('machineArmId') + + if not offazure_machine_id: + raise CLIError( + f"Could not resolve the OffAzure machine ID from Migrate machine '{machine_id}'. " + "Please provide the machine ID in the format " + "'/subscriptions/.../Microsoft.OffAzure/{{HyperVSites|VMwareSites}}/.../machines/...'") + + machine_id = offazure_machine_id + + # Extract resource_group_name from machine_id if not provided + if not resource_group_name: + machine_id_parts = machine_id.split("/") + if len(machine_id_parts) >= 5: + resource_group_name = machine_id_parts[4] + else: + raise CLIError(f"Invalid machine ARM ID format: '{machine_id}'") + + rg_uri = ( + f"/subscriptions/{subscription_id}/" + f"resourceGroups/{resource_group_name}") + + return rg_uri, machine_id + + +def validate_required_parameters(machine_id, + target_storage_path_id, + target_resource_group_id, + target_vm_name, + source_appliance_name, + target_appliance_name, + disk_to_include, + nic_to_include, + target_virtual_switch_id, + os_disk_id, + is_dynamic_memory_enabled): + # Validate required parameters + if not machine_id: + raise CLIError("machine_id could not be determined.") + if not target_storage_path_id: + raise CLIError("target_storage_path_id is required.") + if not target_resource_group_id: + raise CLIError("target_resource_group_id is required.") + if not target_vm_name: + raise CLIError("target_vm_name is required.") + if not source_appliance_name: + raise CLIError("source_appliance_name is required.") + if not target_appliance_name: + raise CLIError("target_appliance_name is required.") + + # Validate parameter set requirements + is_power_user_mode = (disk_to_include is not None or + nic_to_include is not None) + is_default_user_mode = (target_virtual_switch_id is not None or + os_disk_id is not None) + + if is_power_user_mode and is_default_user_mode: + raise CLIError( + "Cannot mix default user mode parameters " + "(target_virtual_switch_id, os_disk_id) with power user mode " + "parameters (disk_to_include, nic_to_include).") + + if is_power_user_mode: + # Power user mode validation + if not disk_to_include: + raise CLIError( + "disk_to_include is required when using power user mode.") + if not nic_to_include: + raise CLIError( + "nic_to_include is required when using power user mode.") + else: + # Default user mode validation + if not target_virtual_switch_id: + raise CLIError( + "target_virtual_switch_id is required when using " + "default user mode.") + if not os_disk_id: + raise CLIError( + "os_disk_id is required when using default user mode.") + + is_dynamic_ram_enabled = None + if is_dynamic_memory_enabled: + if is_dynamic_memory_enabled not in ['true', 'false']: + raise CLIError( + "is_dynamic_memory_enabled must be either " + "'true' or 'false'.") + is_dynamic_ram_enabled = is_dynamic_memory_enabled == 'true' + return is_dynamic_ram_enabled, is_power_user_mode + + +def validate_ARM_id_formats(machine_id, + target_storage_path_id, + target_resource_group_id, + target_virtual_switch_id, + target_test_virtual_switch_id): + # Validate ARM ID formats + if not validate_arm_id_format( + machine_id, + IdFormats.MachineArmIdTemplate): + raise CLIError( + f"Invalid -machine_id '{machine_id}'. " + f"A valid machine ARM ID should follow the format " + f"'{IdFormats.MachineArmIdTemplate}'.") + + if not validate_arm_id_format( + target_storage_path_id, + IdFormats.StoragePathArmIdTemplate): + raise CLIError( + f"Invalid -target_storage_path_id " + f"'{target_storage_path_id}'. " + f"A valid storage path ARM ID should follow the format " + f"'{IdFormats.StoragePathArmIdTemplate}'.") + + if not validate_arm_id_format( + target_resource_group_id, + IdFormats.ResourceGroupArmIdTemplate): + raise CLIError( + f"Invalid -target_resource_group_id " + f"'{target_resource_group_id}'. " + f"A valid resource group ARM ID should follow the format " + f"'{IdFormats.ResourceGroupArmIdTemplate}'.") + + if (target_virtual_switch_id and + not validate_arm_id_format( + target_virtual_switch_id, + IdFormats.LogicalNetworkArmIdTemplate)): + raise CLIError( + f"Invalid -target_virtual_switch_id " + f"'{target_virtual_switch_id}'. " + f"A valid logical network ARM ID should follow the format " + f"'{IdFormats.LogicalNetworkArmIdTemplate}'.") + + if (target_test_virtual_switch_id and + not validate_arm_id_format( + target_test_virtual_switch_id, + IdFormats.LogicalNetworkArmIdTemplate)): + raise CLIError( + f"Invalid -target_test_virtual_switch_id " + f"'{target_test_virtual_switch_id}'. " + f"A valid logical network ARM ID should follow the format " + f"'{IdFormats.LogicalNetworkArmIdTemplate}'.") + + machine_id_parts = machine_id.split("/") + if len(machine_id_parts) < 11: + raise CLIError(f"Invalid machine ARM ID format: '{machine_id}'") + + resource_group_name = machine_id_parts[4] + site_type = machine_id_parts[7] + site_name = machine_id_parts[8] + machine_name = machine_id_parts[10] + + run_as_account_id = None + instance_type = None + return site_type, site_name, machine_name, run_as_account_id, instance_type, resource_group_name + + +def validate_replication_extension(cmd, + rg_uri, + source_fabric, + target_fabric, + replication_vault_name): + source_fabric_id = source_fabric['id'] + target_fabric_id = target_fabric['id'] + source_fabric_short_name = source_fabric_id.split('/')[-1] + target_fabric_short_name = target_fabric_id.split('/')[-1] + replication_extension_name = ( + f"{source_fabric_short_name}-{target_fabric_short_name}-" + f"MigReplicationExtn") + extension_uri = ( + f"{rg_uri}/providers/Microsoft.DataReplication" + f"/replicationVaults/{replication_vault_name}" + f"/replicationExtensions/{replication_extension_name}" + ) + replication_extension = get_resource_by_id( + cmd, extension_uri, APIVersion.Microsoft_DataReplication.value) + + if not replication_extension: + raise CLIError( + f"The replication extension '{replication_extension_name}' " + f"not found. Run 'az migrate local replication init' first.") + + extension_state = (replication_extension.get('properties', {}) + .get('provisioningState')) + + if extension_state != ProvisioningState.Succeeded.value: + raise CLIError( + f"The replication extension '{replication_extension_name}' " + f"is not ready. State: '{extension_state}'") + return replication_extension_name + + +def validate_target_VM_name(target_vm_name): + if len(target_vm_name) == 0 or len(target_vm_name) > 64: + raise CLIError( + "The target virtual machine name must be between 1 and 64 " + "characters long.") + + vm_name_pattern = r"^[^_\W][a-zA-Z0-9\-]{0,63}(?= 400: + error_message = ( + f"Failed to remove replication. " + f"Status: {response.status_code}") + try: + error_body = response.json() + if 'error' in error_body: + error_details = error_body['error'] + error_code = error_details.get('code', 'Unknown') + error_msg = error_details.get( + 'message', 'No message provided') + raise CLIError(f"{error_code}: {error_msg}") + except (ValueError, KeyError): + error_message += f", Response: {response.text}" + raise CLIError(error_message) + + return response + + except CLIError: + raise + except Exception as e: + logger.error( + "Error removing replication for '%s': %s", + protected_item_name, str(e)) + raise CLIError( + f"Failed to remove replication: {str(e)}") + + +def get_job_details(cmd, subscription_id, resource_group_name, + vault_name, job_name): + """ + Retrieve job details after initiating removal. + + Args: + cmd: The CLI command context + subscription_id (str): Subscription ID + resource_group_name (str): Resource group name + vault_name (str): Vault name + job_name (str): Job name + + Returns: + dict or None: Job details if successful, None otherwise + """ + from azext_migrate.helpers._utils import ( + get_resource_by_id, + APIVersion + ) + + job_uri = ( + f"/subscriptions/{subscription_id}/" + f"resourceGroups/{resource_group_name}/" + f"providers/Microsoft.DataReplication/" + f"replicationVaults/{vault_name}/" + f"jobs/{job_name}" + ) + + try: + job_details = get_resource_by_id( + cmd, + job_uri, + APIVersion.Microsoft_DataReplication.value + ) + + return job_details + + except Exception as job_error: + logger.warning( + "Could not retrieve job details: %s. " + "Replication removal was initiated.", + str(job_error)) + return None + + +def execute_removal(cmd, subscription_id, target_object_id, + resource_group_name, vault_name, + protected_item_name, force_remove): + """ + Execute the replication removal workflow. + + Args: + cmd: The CLI command context + subscription_id (str): Subscription ID + target_object_id (str): Protected item ARM ID + resource_group_name (str): Resource group name + vault_name (str): Vault name + protected_item_name (str): Protected item name + force_remove (bool): Whether to force delete + + Returns: + dict or None: Job details if available + """ + from azext_migrate.helpers.replication.remove._parse import ( + extract_job_name_from_operation + ) + from azext_migrate.helpers.replication.remove._output import ( + display_removal_success, + display_removal_initiated, + log_removal_success + ) + + logger.info( + "Attempting to remove replication for protected item '%s' " + "in vault '%s'", + protected_item_name, vault_name) + + # Send the DELETE request + response = send_delete_request( + cmd, target_object_id, force_remove, protected_item_name) + + # Extract the job name from the response headers + operation_location = response.headers.get( + 'Azure-AsyncOperation') or response.headers.get('Location') + + job_name = extract_job_name_from_operation(operation_location) + + if job_name: + # Try to get and return the job details + job_details = get_job_details( + cmd, subscription_id, resource_group_name, + vault_name, job_name) + + if job_details: + log_removal_success(protected_item_name, job_name) + display_removal_success( + protected_item_name, job_name, resource_group_name) + return job_details + else: + # Job details unavailable but we have the job name + display_removal_success( + protected_item_name, job_name, resource_group_name) + return None + else: + # No job name available + log_removal_success(protected_item_name) + display_removal_initiated(protected_item_name) + return None diff --git a/src/migrate/azext_migrate/helpers/replication/remove/_output.py b/src/migrate/azext_migrate/helpers/replication/remove/_output.py new file mode 100644 index 00000000000..34febea25d5 --- /dev/null +++ b/src/migrate/azext_migrate/helpers/replication/remove/_output.py @@ -0,0 +1,62 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +Output formatting utilities for Azure Migrate replication removal. +""" + +from knack.log import get_logger + +logger = get_logger(__name__) + + +def display_removal_success(protected_item_name, job_name, + resource_group_name): + """ + Display success message with job tracking information. + + Args: + protected_item_name (str): Name of the protected item + job_name (str): Name of the removal job + resource_group_name (str): Resource group name + """ + print("Successfully initiated removal of replication for " + "'{}'.".format(protected_item_name)) + print("Job ID: {}".format(job_name)) + print("\nTo check removal job status, run:") + print(" az migrate local replication get-job " + "--job-name {} " + "--resource-group {} " + "--project-name ".format(job_name, resource_group_name)) + + +def display_removal_initiated(protected_item_name): + """ + Display simple success message when job details are unavailable. + + Args: + protected_item_name (str): Name of the protected item + """ + print(f"Successfully initiated removal of replication for " + f"'{protected_item_name}'.") + + +def log_removal_success(protected_item_name, job_name=None): + """ + Log successful removal initiation. + + Args: + protected_item_name (str): Name of the protected item + job_name (str, optional): Name of the removal job + """ + if job_name: + logger.info( + "Successfully initiated removal of replication " + "for '%s'. Job: %s", + protected_item_name, job_name) + else: + logger.info( + "Successfully initiated removal of replication for '%s'", + protected_item_name) diff --git a/src/migrate/azext_migrate/helpers/replication/remove/_parse.py b/src/migrate/azext_migrate/helpers/replication/remove/_parse.py new file mode 100644 index 00000000000..6a94508076f --- /dev/null +++ b/src/migrate/azext_migrate/helpers/replication/remove/_parse.py @@ -0,0 +1,77 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +Protected item ID parsing utilities for Azure Migrate replication removal. +""" + +from knack.util import CLIError + + +def parse_protected_item_id(target_object_id): + """ + Parse a protected item ARM ID to extract components. + + Args: + target_object_id (str): The protected item ARM ID + + Returns: + tuple: (resource_group_name, vault_name, protected_item_name) + + Raises: + CLIError: If the protected item ID format is invalid + """ + if not target_object_id: + raise CLIError( + "The --target-object-id parameter is required.") + + # Expected format: /subscriptions/{sub}/resourceGroups/{rg}/providers/ + # Microsoft.DataReplication/replicationVaults/{vault}/ + # protectedItems/{item} + try: + protected_item_id_parts = target_object_id.split("/") + if len(protected_item_id_parts) < 11: + raise ValueError("Invalid protected item ID format") + + resource_group_name = protected_item_id_parts[4] + vault_name = protected_item_id_parts[8] + protected_item_name = protected_item_id_parts[10] + + return resource_group_name, vault_name, protected_item_name + + except (IndexError, ValueError) as e: + raise CLIError( + f"Invalid target object ID format: {target_object_id}. " + "Expected format: /subscriptions/{{subscription-id}}/" + "resourceGroups/{{resource-group}}/providers/" + "Microsoft.DataReplication/replicationVaults/{{vault-name}}/" + f"protectedItems/{{item-name}}. Error: {str(e)}" + ) + + +def extract_job_name_from_operation(operation_location): + """ + Extract job name from the operation location header. + + Args: + operation_location (str): The operation location URL from response headers + + Returns: + str or None: The job name if found, otherwise None + """ + if not operation_location: + return None + + # Extract job name from the operation location + # Format: .../jobs/{jobName}?... or .../jobs/{jobName} + job_parts = operation_location.split('/') + job_name = None + for i, part in enumerate(job_parts): + if part == 'jobs' and i + 1 < len(job_parts): + # Get the job name and remove query string if present + job_name = job_parts[i + 1].split('?')[0] + break + + return job_name diff --git a/src/migrate/azext_migrate/helpers/replication/remove/_validate.py b/src/migrate/azext_migrate/helpers/replication/remove/_validate.py new file mode 100644 index 00000000000..d7ffb673e94 --- /dev/null +++ b/src/migrate/azext_migrate/helpers/replication/remove/_validate.py @@ -0,0 +1,72 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +Validation utilities for Azure Migrate replication removal. +""" + +from knack.util import CLIError +from knack.log import get_logger + +logger = get_logger(__name__) + + +def validate_protected_item(cmd, target_object_id): + """ + Validate that the protected item exists and can be removed. + + Args: + cmd: The CLI command context + target_object_id (str): The protected item ARM ID + + Returns: + dict: The protected item resource + + Raises: + CLIError: If the protected item is not found or cannot be removed + """ + from azext_migrate.helpers._utils import ( + get_resource_by_id, + APIVersion + ) + + logger.info( + "Validating protected item '%s'", + target_object_id) + + try: + protected_item = get_resource_by_id( + cmd, + target_object_id, + APIVersion.Microsoft_DataReplication.value + ) + + if not protected_item: + raise CLIError( + f"Replication item is not found with Id " + f"'{target_object_id}'.") + + # Check if the protected item allows DisableProtection operation + properties = protected_item.get('properties', {}) + allowed_jobs = properties.get('allowedJobs', []) + + if "DisableProtection" not in allowed_jobs: + protection_state = properties.get( + 'protectionStateDescription', 'Unknown') + raise CLIError( + f"Replication item with Id '{target_object_id}' cannot " + f"be removed at this moment. Current protection state is " + f"'{protection_state}'.") + + return protected_item + + except CLIError: + raise + except Exception as e: + logger.error( + "Error retrieving protected item '%s': %s", + target_object_id, str(e)) + raise CLIError( + f"Failed to retrieve replication item: {str(e)}") diff --git a/src/migrate/azext_migrate/tests/latest/test_migrate_commands.py b/src/migrate/azext_migrate/tests/latest/test_migrate_commands.py index d05a286fea1..016c3b3e54e 100644 --- a/src/migrate/azext_migrate/tests/latest/test_migrate_commands.py +++ b/src/migrate/azext_migrate/tests/latest/test_migrate_commands.py @@ -63,7 +63,7 @@ def _create_mock_cmd(self, command_name='migrate local get-discovered-server'): return mock_cmd @mock.patch( - 'azext_migrate._get_discovered_server_helpers.fetch_all_servers') + 'azext_migrate.helpers._server.fetch_all_servers') @mock.patch( 'azure.cli.core.commands.client_factory.get_subscription_id') def test_get_discovered_server_list_all(self, mock_get_sub_id, @@ -84,10 +84,10 @@ def test_get_discovered_server_list_all(self, mock_get_sub_id, mock_cmd = self._create_mock_cmd() # Execute the command - result = get_discovered_server( + get_discovered_server( cmd=mock_cmd, project_name=self.mock_project_name, - resource_group_name=self.mock_rg_name + resource_group=self.mock_rg_name ) # Verify the fetch_all_servers was called correctly @@ -100,7 +100,7 @@ def test_get_discovered_server_list_all(self, mock_get_sub_id, self.assertIn('/machines?', request_uri) @mock.patch( - 'azext_migrate._get_discovered_server_helpers.fetch_all_servers') + 'azext_migrate.helpers._server.fetch_all_servers') @mock.patch( 'azure.cli.core.commands.client_factory.get_subscription_id') def test_get_discovered_server_with_display_name_filter( @@ -117,10 +117,10 @@ def test_get_discovered_server_with_display_name_filter( mock_cmd = self._create_mock_cmd() - result = get_discovered_server( + get_discovered_server( cmd=mock_cmd, project_name=self.mock_project_name, - resource_group_name=self.mock_rg_name, + resource_group=self.mock_rg_name, display_name=target_display_name ) @@ -130,7 +130,7 @@ def test_get_discovered_server_with_display_name_filter( self.assertIn(target_display_name, call_args[0][1]) @mock.patch( - 'azext_migrate._get_discovered_server_helpers.fetch_all_servers') + 'azext_migrate.helpers._server.fetch_all_servers') @mock.patch( 'azure.cli.core.commands.client_factory.get_subscription_id') def test_get_discovered_server_with_appliance_vmware( @@ -147,7 +147,7 @@ def test_get_discovered_server_with_appliance_vmware( get_discovered_server( cmd=mock_cmd, project_name=self.mock_project_name, - resource_group_name=self.mock_rg_name, + resource_group=self.mock_rg_name, appliance_name=self.mock_appliance_name, source_machine_type="VMware" ) @@ -158,7 +158,7 @@ def test_get_discovered_server_with_appliance_vmware( self.assertIn(self.mock_appliance_name, call_args[0][1]) @mock.patch( - 'azext_migrate._get_discovered_server_helpers.fetch_all_servers') + 'azext_migrate.helpers._server.fetch_all_servers') @mock.patch( 'azure.cli.core.commands.client_factory.get_subscription_id') def test_get_discovered_server_with_appliance_hyperv( @@ -172,10 +172,10 @@ def test_get_discovered_server_with_appliance_hyperv( mock_cmd = self._create_mock_cmd() - result = get_discovered_server( + get_discovered_server( cmd=mock_cmd, project_name=self.mock_project_name, - resource_group_name=self.mock_rg_name, + resource_group=self.mock_rg_name, appliance_name=self.mock_appliance_name, source_machine_type="HyperV" ) @@ -186,7 +186,7 @@ def test_get_discovered_server_with_appliance_hyperv( self.assertIn(self.mock_appliance_name, call_args[0][1]) @mock.patch( - 'azext_migrate._get_discovered_server_helpers.fetch_all_servers') + 'azext_migrate.helpers._server.fetch_all_servers') @mock.patch( 'azure.cli.core.commands.client_factory.get_subscription_id') def test_get_discovered_server_specific_machine( @@ -201,10 +201,10 @@ def test_get_discovered_server_specific_machine( mock_cmd = self._create_mock_cmd() - result = get_discovered_server( + get_discovered_server( cmd=mock_cmd, project_name=self.mock_project_name, - resource_group_name=self.mock_rg_name, + resource_group=self.mock_rg_name, name=specific_name ) @@ -213,7 +213,7 @@ def test_get_discovered_server_specific_machine( self.assertIn(f"/machines/{specific_name}?", call_args[0][1]) @mock.patch( - 'azext_migrate._get_discovered_server_helpers.fetch_all_servers') + 'azext_migrate.helpers._server.fetch_all_servers') @mock.patch( 'azure.cli.core.commands.client_factory.get_subscription_id') def test_get_discovered_server_with_pagination(self, mock_get_sub_id, @@ -234,10 +234,10 @@ def test_get_discovered_server_with_pagination(self, mock_get_sub_id, get_discovered_server( cmd=mock_cmd, project_name=self.mock_project_name, - resource_group_name=self.mock_rg_name + resource_group=self.mock_rg_name ) - # Verify fetch_all_servers was called once + # Verify fetch_all_servers was called once # (the pagination logic is handled inside fetch_all_servers) mock_fetch_servers.assert_called_once() @@ -251,7 +251,7 @@ def test_get_discovered_server_missing_project_name(self): get_discovered_server( cmd=mock_cmd, project_name=None, - resource_group_name=self.mock_rg_name + resource_group=self.mock_rg_name ) self.assertIn("project_name", str(context.exception)) @@ -266,7 +266,7 @@ def test_get_discovered_server_missing_resource_group(self): get_discovered_server( cmd=mock_cmd, project_name=self.mock_project_name, - resource_group_name=None + resource_group=None ) self.assertIn("resource_group_name", str(context.exception)) @@ -281,7 +281,7 @@ def test_get_discovered_server_invalid_machine_type(self): get_discovered_server( cmd=mock_cmd, project_name=self.mock_project_name, - resource_group_name=self.mock_rg_name, + resource_group=self.mock_rg_name, source_machine_type="InvalidType" ) @@ -433,12 +433,12 @@ def _create_mock_dra(self, appliance_name, instance_type): @mock.patch( 'azure.cli.core.commands.client_factory.get_mgmt_service_client') @mock.patch( - 'azext_migrate._helpers.' + 'azext_migrate.helpers._utils.' 'create_or_update_resource') @mock.patch( - 'azext_migrate._get_discovered_server_helpers.fetch_all_servers') + 'azext_migrate.helpers._server.fetch_all_servers') @mock.patch( - 'azext_migrate._helpers.get_resource_by_id') + 'azext_migrate.helpers._utils.get_resource_by_id') @mock.patch( 'azure.cli.core.commands.client_factory.get_subscription_id') @mock.patch('time.sleep') @@ -514,7 +514,7 @@ def test_initialize_replication_infrastructure_success( with self.assertRaises(Exception): initialize_replication_infrastructure( cmd=mock_cmd, - resource_group_name=self.mock_rg_name, + resource_group=self.mock_rg_name, project_name=self.mock_project_name, source_appliance_name=self.mock_source_appliance, target_appliance_name=self.mock_target_appliance @@ -536,7 +536,7 @@ def test_initialize_replication_missing_resource_group(self): with self.assertRaises((CLIError, KnackCLIError)) as context: initialize_replication_infrastructure( cmd=mock_cmd, - resource_group_name=None, + resource_group=None, project_name=self.mock_project_name, source_appliance_name=self.mock_source_appliance, target_appliance_name=self.mock_target_appliance @@ -554,7 +554,7 @@ def test_initialize_replication_missing_project_name(self): with self.assertRaises((CLIError, KnackCLIError)) as context: initialize_replication_infrastructure( cmd=mock_cmd, - resource_group_name=self.mock_rg_name, + resource_group=self.mock_rg_name, project_name=None, source_appliance_name=self.mock_source_appliance, target_appliance_name=self.mock_target_appliance @@ -572,7 +572,7 @@ def test_initialize_replication_missing_source_appliance(self): with self.assertRaises((CLIError, KnackCLIError)) as context: initialize_replication_infrastructure( cmd=mock_cmd, - resource_group_name=self.mock_rg_name, + resource_group=self.mock_rg_name, project_name=self.mock_project_name, source_appliance_name=None, target_appliance_name=self.mock_target_appliance @@ -590,7 +590,7 @@ def test_initialize_replication_missing_target_appliance(self): with self.assertRaises((CLIError, KnackCLIError)) as context: initialize_replication_infrastructure( cmd=mock_cmd, - resource_group_name=self.mock_rg_name, + resource_group=self.mock_rg_name, project_name=self.mock_project_name, source_appliance_name=self.mock_source_appliance, target_appliance_name=None @@ -648,7 +648,7 @@ def test_new_replication_missing_machine_identifier(self): source_appliance_name="source-appliance", target_appliance_name="target-appliance" ) - except (CLIError, KnackCLIError, Exception) as e: + except (CLIError, KnackCLIError, Exception): # Expected to fail # Either machine_id or machine_index should be provided pass @@ -666,7 +666,7 @@ def test_new_replication_machine_index_without_project(self): machine_id=None, machine_index=1, project_name=None, # Missing - resource_group_name=None, # Missing + resource_group=None, # Missing target_storage_path_id=("/subscriptions/sub/resourceGroups" "/rg/providers/" "Microsoft.AzureStackHCI" @@ -677,14 +677,14 @@ def test_new_replication_machine_index_without_project(self): source_appliance_name="source-appliance", target_appliance_name="target-appliance" ) - except (CLIError, KnackCLIError, Exception) as e: + except (CLIError, KnackCLIError, Exception): # Expected to fail pass @mock.patch( - 'azext_migrate._helpers.send_get_request') + 'azext_migrate.helpers._utils.send_get_request') @mock.patch( - 'azext_migrate._helpers.get_resource_by_id') + 'azext_migrate.helpers._utils.get_resource_by_id') @mock.patch( 'azure.cli.core.commands.client_factory.get_subscription_id') def test_new_replication_with_machine_index(self, @@ -741,7 +741,7 @@ def test_new_replication_with_machine_index(self, machine_id=None, machine_index=1, project_name=self.mock_project_name, - resource_group_name=self.mock_rg_name, + resource_group=self.mock_rg_name, target_storage_path_id=("/subscriptions/sub/resourceGroups/" "rg/providers/" "Microsoft.AzureStackHCI/" @@ -771,8 +771,8 @@ def test_new_replication_with_machine_index(self, else: # If mocks weren't called, ensure we got some expected exception # indicating the function at least tried to execute - self.assertIsNotNone(exception_caught, - "Function should have either called mocks or raised an exception") + self.assertIsNotNone(exception_caught, + "Function should have either called mocks or raised an exception") def test_new_replication_required_parameters_default_mode(self): """Test that required parameters for default user mode are @@ -805,7 +805,7 @@ def test_new_replication_required_parameters_default_mode(self): try: new_local_server_replication(**required_params) - except Exception as e: + except Exception: # Expected to fail at later stages pass @@ -836,7 +836,7 @@ def test_new_replication_required_parameters_power_user_mode(self): try: new_local_server_replication(**required_params) - except Exception as e: + except Exception: # Expected to fail at later stages pass diff --git a/src/migrate/linter_exclusions.yml b/src/migrate/linter_exclusions.yml index b1dbdc0142f..5d1f7b924c9 100644 --- a/src/migrate/linter_exclusions.yml +++ b/src/migrate/linter_exclusions.yml @@ -42,3 +42,15 @@ migrate local replication new: resource_group_name: rule_exclusions: - parameter_should_not_end_in_resource_group + +migrate local replication get-job: + rule_exclusions: + - missing_command_test_coverage + - missing_parameter_test_coverage + - missing_command_example + +migrate local replication remove: + rule_exclusions: + - missing_command_test_coverage + - missing_parameter_test_coverage + - missing_command_example diff --git a/src/migrate/setup.py b/src/migrate/setup.py index fd59f7e0608..c44c6199365 100644 --- a/src/migrate/setup.py +++ b/src/migrate/setup.py @@ -7,7 +7,7 @@ from setuptools import setup, find_packages -VERSION = "2.0.1b1" +VERSION = "3.0.0b1" CLASSIFIERS = [ 'Development Status :: 4 - Beta',