From fbcfed7c9179661335bc640440dd8dcc14d55717 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 7 Nov 2025 10:40:19 +0530 Subject: [PATCH 01/29] NFS Cloudstack volume and export policy utils --- .../driver/OntapPrimaryDatastoreDriver.java | 4 +- .../storage/feign/client/NASFeignClient.java | 30 +-- .../feign/client/VolumeFeignClient.java | 12 +- .../storage/feign/model/FileInfo.java | 0 .../storage/feign/model/LunMap.java | 0 .../cloudstack/storage/feign/model/Qos.java | 0 .../OntapPrimaryDatastoreLifecycle.java | 36 +++- .../storage/provider/OntapHostListener.java | 37 ++++ .../OntapPrimaryDatastoreProvider.java | 5 +- .../provider/StorageProviderFactory.java | 2 +- .../storage/service/StorageStrategy.java | 25 ++- .../storage/service/UnifiedNASStrategy.java | 187 +++++++++++++++++- .../storage/service/model/AccessGroup.java | 29 +++ .../service/model/CloudStackVolume.java | 16 ++ .../storage/service/model/ProtocolType.java | 2 +- .../cloudstack/storage/utils/Constants.java | 2 + .../cloudstack/storage/utils/Utility.java | 112 +++++++---- 17 files changed, 430 insertions(+), 69 deletions(-) mode change 100644 => 100755 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java mode change 100644 => 100755 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunMap.java mode change 100644 => 100755 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Qos.java create mode 100644 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index e2eb6220230a..bf027c6a1466 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -129,10 +129,12 @@ private String createCloudStackVolumeForTypeVolume(DataStore dataStore, DataObje Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId()); StorageStrategy storageStrategy = getStrategyByStoragePoolDetails(details); s_logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(Constants.SVM_NAME)); - CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, dataObject); + CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, (VolumeInfo) dataObject); CloudStackVolume cloudStackVolume = storageStrategy.createCloudStackVolume(cloudStackVolumeRequest); if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL)) && cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) { return cloudStackVolume.getLun().getName(); + } else if (ProtocolType.NFS.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + return cloudStackVolume.getFile().getName(); } else { String errMsg = "createCloudStackVolumeForTypeVolume: Volume creation failed. Lun or Lun Path is null for dataObject: " + dataObject; s_logger.error(errMsg); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java index b7aac9954cfe..339962cad25e 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java @@ -26,39 +26,39 @@ import feign.Param; import feign.RequestLine; -//TODO: Proper URLs should be added in the RequestLine annotations below public interface NASFeignClient { // File Operations @RequestLine("GET /{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) OntapResponse getFileResponse(@Param("authHeader") String authHeader, - @Param("volumeUuid") String volumeUUID, - @Param("path") String filePath); + @Param("volumeUuid") String volumeUUID, + @Param("path") String filePath); @RequestLine("DELETE /{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) void deleteFile(@Param("authHeader") String authHeader, - @Param("volumeUuid") String volumeUUID, - @Param("path") String filePath); + @Param("volumeUuid") String volumeUUID, + @Param("path") String filePath); @RequestLine("PATCH /{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) void updateFile(@Param("authHeader") String authHeader, - @Param("volumeUuid") String volumeUUID, - @Param("path") String filePath, FileInfo fileInfo); + @Param("volumeUuid") String volumeUUID, + @Param("path") String filePath, + FileInfo fileInfo); @RequestLine("POST /{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) void createFile(@Param("authHeader") String authHeader, - @Param("volumeUuid") String volumeUUID, - @Param("path") String filePath, FileInfo file); + @Param("volumeUuid") String volumeUUID, + @Param("path") String filePath, + FileInfo file); // Export Policy Operations @RequestLine("POST /") - @Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"}) + @Headers({"Authorization: {authHeader}"}) ExportPolicy createExportPolicy(@Param("authHeader") String authHeader, - @Param("returnRecords") boolean returnRecords, ExportPolicy exportPolicy); @RequestLine("GET /") @@ -68,16 +68,16 @@ ExportPolicy createExportPolicy(@Param("authHeader") String authHeader, @RequestLine("GET /{id}") @Headers({"Authorization: {authHeader}"}) OntapResponse getExportPolicyById(@Param("authHeader") String authHeader, - @Param("id") String id); + @Param("id") String id); @RequestLine("DELETE /{id}") @Headers({"Authorization: {authHeader}"}) void deleteExportPolicyById(@Param("authHeader") String authHeader, - @Param("id") String id); + @Param("id") String id); @RequestLine("PATCH /{id}") @Headers({"Authorization: {authHeader}"}) OntapResponse updateExportPolicy(@Param("authHeader") String authHeader, - @Param("id") String id, - ExportPolicy request); + @Param("id") String id, + ExportPolicy request); } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java index 9a2c76639221..cdb898ad0ae1 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java @@ -18,11 +18,15 @@ */ package org.apache.cloudstack.storage.feign.client; +import feign.QueryMap; import org.apache.cloudstack.storage.feign.model.Volume; import org.apache.cloudstack.storage.feign.model.response.JobResponse; import feign.Headers; import feign.Param; import feign.RequestLine; +import org.apache.cloudstack.storage.feign.model.response.OntapResponse; + +import java.util.Map; public interface VolumeFeignClient { @@ -38,8 +42,12 @@ public interface VolumeFeignClient { @Headers({"Authorization: {authHeader}"}) Volume getVolumeByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid); + @RequestLine("GET /api/storage/volumes") + @Headers({"Authorization: {authHeader}"}) + OntapResponse getVolume(@Param("authHeader") String authHeader, @QueryMap Map queryMap); + @RequestLine("PATCH /api/storage/volumes/{uuid}") - @Headers({"Accept: {acceptHeader}", "Authorization: {authHeader}"}) - JobResponse updateVolumeRebalancing(@Param("acceptHeader") String acceptHeader, @Param("uuid") String uuid, Volume volumeRequest); + @Headers({ "Authorization: {authHeader}"}) + JobResponse updateVolumeRebalancing(@Param("authHeader") String authHeader, @Param("uuid") String uuid, Volume volumeRequest); } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java old mode 100644 new mode 100755 diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunMap.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunMap.java old mode 100644 new mode 100755 diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Qos.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Qos.java old mode 100644 new mode 100755 diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 01b013f606dd..e25fa7af3644 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -40,10 +40,13 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl; import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.Volume; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; +import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.ProtocolType; import org.apache.cloudstack.storage.utils.Constants; +import org.apache.cloudstack.storage.utils.Utility; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -184,7 +187,7 @@ public DataStore initialize(Map dsInfos) { String path; ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); switch (protocol) { - case NFS3: + case NFS: parameters.setType(Storage.StoragePoolType.NetworkFilesystem); path = details.get(Constants.MANAGEMENT_LIF) + ":/" + storagePoolName; s_logger.info("Setting NFS path for storage pool: " + path); @@ -213,7 +216,9 @@ public DataStore initialize(Map dsInfos) { long volumeSize = Long.parseLong(details.get(Constants.SIZE)); s_logger.info("Creating ONTAP volume '" + storagePoolName + "' with size: " + volumeSize + " bytes (" + (volumeSize / (1024 * 1024 * 1024)) + " GB)"); - storageStrategy.createStorageVolume(storagePoolName, volumeSize); + Volume volume = storageStrategy.createStorageVolume(storagePoolName, volumeSize); + details.put(Constants.VOLUME_UUID, volume.getUuid()); + details.put(Constants.VOLUME_NAME, volume.getName()); } else { throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage"); } @@ -241,10 +246,20 @@ public DataStore initialize(Map dsInfos) { @Override public boolean attachCluster(DataStore dataStore, ClusterScope scope) { logger.debug("In attachCluster for ONTAP primary storage"); - PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore; - List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore); + PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore; + List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore); - logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId())); + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryStore.getClusterId())); + + Map details = primaryStore.getDetails(); // TODO check while testing , if it is populated we can remove below db call + StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); + AccessGroup accessGroupRequest = new AccessGroup(); + accessGroupRequest.setHostsToConnect(hostsToConnect); + accessGroupRequest.setScope(scope); + accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); + strategy.createAccessGroup(accessGroupRequest); + + logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); for (HostVO host : hostsToConnect) { // TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster try { @@ -265,9 +280,18 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) { logger.debug("In attachZone for ONTAP primary storage"); - List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM); + PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore; + List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM); logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); + + Map details = primaryStore.getDetails(); // TODO check while testing , if it is populated we can remove below db call + StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); + AccessGroup accessGroupRequest = new AccessGroup(); + accessGroupRequest.setHostsToConnect(hostsToConnect); + accessGroupRequest.setScope(scope); + accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); + strategy.createAccessGroup(accessGroupRequest); for (HostVO host : hostsToConnect) { // TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster try { diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java new file mode 100644 index 000000000000..beec2edabdff --- /dev/null +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java @@ -0,0 +1,37 @@ +package org.apache.cloudstack.storage.provider; + +import com.cloud.exception.StorageConflictException; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; + +class OntapHostListener implements HypervisorHostListener { + + @Override + public boolean hostAdded(long hostId) { + return false; + } + + @Override + public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { + return false; + } + + @Override + public boolean hostDisconnected(long hostId, long poolId) { + return false; + } + + @Override + public boolean hostAboutToBeRemoved(long hostId) { + return false; + } + + @Override + public boolean hostRemoved(long hostId, long clusterId) { + return false; + } + + @Override + public boolean hostEnabled(long hostId) { + return false; + } +} \ No newline at end of file diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java index fa2f14692c77..75d6f6310512 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java @@ -41,6 +41,7 @@ public class OntapPrimaryDatastoreProvider implements PrimaryDataStoreProvider { private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreProvider.class); private OntapPrimaryDatastoreDriver primaryDatastoreDriver; private OntapPrimaryDatastoreLifecycle primaryDatastoreLifecycle; + private OntapHostListener ontapHostListener; public OntapPrimaryDatastoreProvider() { s_logger.info("OntapPrimaryDatastoreProvider initialized"); @@ -57,7 +58,7 @@ public DataStoreDriver getDataStoreDriver() { @Override public HypervisorHostListener getHostListener() { - return null; + return ontapHostListener; } @Override @@ -71,6 +72,8 @@ public boolean configure(Map params) { s_logger.trace("OntapPrimaryDatastoreProvider: configure: Called"); primaryDatastoreDriver = ComponentContext.inject(OntapPrimaryDatastoreDriver.class); primaryDatastoreLifecycle = ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class); + ontapHostListener = ComponentContext.inject(OntapHostListener.class); + return true; } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java index 6bb6ad1fef73..e9448ec16ded 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java @@ -36,7 +36,7 @@ public static StorageStrategy getStrategy(OntapStorage ontapStorage) { ProtocolType protocol = ontapStorage.getProtocol(); s_logger.info("Initializing StorageProviderFactory with protocol: " + protocol); switch (protocol) { - case NFS3: + case NFS: if (!ontapStorage.getIsDisaggregated()) { UnifiedNASStrategy unifiedNASStrategy = new UnifiedNASStrategy(ontapStorage); unifiedNASStrategy.setOntapStorage(ontapStorage); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index 0f9706335784..37310a1467c2 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -156,8 +156,11 @@ public Volume createStorageVolume(String volumeName, Long size) { volumeRequest.setSize(size); // Make the POST API call to create the volume try { - // Create URI for POST CreateVolume API - // Call the VolumeFeignClient to create the volume + /* + ONTAP created a default rule of 0.0.0.0 if no export rule are defined while creating volume + and since in storage pool creation, cloudstack is not aware of the host , we can either create default or + permissive rule and later update it as part of attachCluster or attachZone implementation + */ JobResponse jobResponse = volumeFeignClient.createVolumeWithJob(authHeader, volumeRequest); if (jobResponse == null || jobResponse.getJob() == null) { throw new CloudRuntimeException("Failed to initiate volume creation for " + volumeName); @@ -192,8 +195,20 @@ public Volume createStorageVolume(String volumeName, Long size) { throw new CloudRuntimeException("Failed to create volume: " + e.getMessage()); } s_logger.info("Volume created successfully: " + volumeName); - //TODO - return null; + // Below code is to update volume uuid to storage pool mapping once and used for all other workflow saving get volume call + OntapResponse ontapVolume = new OntapResponse<>(); + try { + Map queryParams = Map.of(Constants.NAME, volumeName); + ontapVolume = volumeFeignClient.getVolume(authHeader, queryParams); + if ((ontapVolume == null || ontapVolume.getRecords().isEmpty())) { + s_logger.error("Exception while getting volume volume not found:"); + throw new CloudRuntimeException("Failed to fetch volume " + volumeName); + } + }catch (Exception e) { + s_logger.error("Exception while getting volume: " + e.getMessage()); + throw new CloudRuntimeException("Failed to fetch volume: " + e.getMessage()); + } + return ontapVolume.getRecords().get(0); } /** @@ -287,7 +302,7 @@ public Volume getStorageVolume(Volume volume) * @param accessGroup the access group to create * @return the created AccessGroup object */ - abstract AccessGroup createAccessGroup(AccessGroup accessGroup); + abstract public AccessGroup createAccessGroup(AccessGroup accessGroup); /** * Method encapsulates the behavior based on the opted protocol in subclasses diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index cb3079691c94..cd6631c119bb 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -19,23 +19,32 @@ package org.apache.cloudstack.storage.service; +import com.cloud.utils.exception.CloudRuntimeException; +import feign.FeignException; import org.apache.cloudstack.storage.feign.FeignClientFactory; import org.apache.cloudstack.storage.feign.client.NASFeignClient; +import org.apache.cloudstack.storage.feign.client.VolumeFeignClient; +import org.apache.cloudstack.storage.feign.model.ExportPolicy; +import org.apache.cloudstack.storage.feign.model.FileInfo; +import org.apache.cloudstack.storage.feign.model.Nas; import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.Svm; +import org.apache.cloudstack.storage.feign.model.Volume; +import org.apache.cloudstack.storage.feign.model.response.OntapResponse; import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; import org.apache.cloudstack.storage.utils.Constants; +import org.apache.cloudstack.storage.utils.Utility; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; - import java.util.Map; public class UnifiedNASStrategy extends NASStrategy { private static final Logger s_logger = LogManager.getLogger(UnifiedNASStrategy.class); - // Add missing Feign client setup for NAS operations private final FeignClientFactory feignClientFactory; private final NASFeignClient nasFeignClient; + private final VolumeFeignClient volumeFeignClient; public UnifiedNASStrategy(OntapStorage ontapStorage) { super(ontapStorage); @@ -43,6 +52,7 @@ public UnifiedNASStrategy(OntapStorage ontapStorage) { // Initialize FeignClientFactory and create NAS client this.feignClientFactory = new FeignClientFactory(); this.nasFeignClient = feignClientFactory.createClient(NASFeignClient.class, baseURL); + this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class, baseURL); } public void setOntapStorage(OntapStorage ontapStorage) { @@ -51,8 +61,17 @@ public void setOntapStorage(OntapStorage ontapStorage) { @Override public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume) { - //TODO: Implement NAS volume creation using nasFeignClient - return null; + s_logger.info("createCloudStackVolume: Create cloudstack volume " + cloudstackVolume); + try { + createFile(cloudstackVolume.getVolume().getUuid(),cloudstackVolume.getCloudstackVolName(), cloudstackVolume.getFile()); + s_logger.debug("Successfully created file in ONTAP under volume with path {} or name {} ", cloudstackVolume.getVolume().getUuid(), cloudstackVolume.getCloudstackVolName()); + FileInfo responseFile = cloudstackVolume.getFile(); + responseFile.setPath(cloudstackVolume.getCloudstackVolName()); + }catch (Exception e) { + s_logger.error("Exception occurred while creating file or dir: {}. Exception: {}", cloudstackVolume.getCloudstackVolName(), e.getMessage()); + throw new CloudRuntimeException("Failed to create file: " + e.getMessage()); + } + return cloudstackVolume; } @Override @@ -104,4 +123,164 @@ void enableLogicalAccess(Map values) { void disableLogicalAccess(Map values) { //TODO } + + + private ExportPolicy createExportPolicy(String svmName, String policyName) { + s_logger.info("Creating export policy: {} for SVM: {}", policyName, svmName); + + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + + // Create ExportPolicy object + ExportPolicy exportPolicy = new ExportPolicy(); + exportPolicy.setName(policyName); + + // Set SVM + Svm svm = new Svm(); + svm.setName(svmName); + exportPolicy.setSvm(svm); + + // Create export policy + ExportPolicy createdPolicy = nasFeignClient.createExportPolicy(authHeader, exportPolicy); + + if (createdPolicy != null && createdPolicy.getId() != null) { + s_logger.info("Export policy created successfully with ID: {}", createdPolicy.getId()); + return createdPolicy; + } else { + throw new CloudRuntimeException("Failed to create export policy: " + policyName); + } + + } catch (FeignException e) { + s_logger.error("Failed to create export policy: {}", policyName, e); + throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage()); + } catch (Exception e) { + s_logger.error("Exception while creating export policy: {}", policyName, e); + throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage()); + } + } + + + private void deleteExportPolicy(String svmName, String policyName) { + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); + + if (policiesResponse.getRecords() == null || policiesResponse.getRecords().isEmpty()) { + s_logger.warn("Export policy not found for deletion: {}", policyName); + throw new CloudRuntimeException("Export policy not found : " + policyName); + } + String policyId = policiesResponse.getRecords().get(0).getId().toString(); + nasFeignClient.deleteExportPolicyById(authHeader, policyId); + s_logger.info("Export policy deleted successfully: {}", policyName); + } catch (Exception e) { + s_logger.error("Failed to delete export policy: {}", policyName, e); + throw new CloudRuntimeException("Failed to delete export policy: " + policyName); + } + } + + + private String addExportRule(String policyName, String clientMatch, String[] protocols, String[] roRule, String[] rwRule) { + return ""; + } + + private String assignExportPolicyToVolume(String volumeUuid, String policyName) { + s_logger.info("Assigning export policy: {} to volume: {}", policyName, volumeUuid); + + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); + if (policiesResponse.getRecords() == null || policiesResponse.getRecords().isEmpty()) { + throw new CloudRuntimeException("Export policy not found: " + policyName); + } + ExportPolicy exportPolicy = policiesResponse.getRecords().get(0); + // Create Volume update object with NAS configuration + Volume volumeUpdate = new Volume(); + Nas nas = new Nas(); + nas.setExportPolicy(exportPolicy); + volumeUpdate.setNas(nas); + + volumeFeignClient.updateVolumeRebalancing(authHeader, volumeUuid, volumeUpdate); + s_logger.info("Export policy successfully assigned to volume: {}", volumeUuid); + return "Export policy " + policyName + " assigned to volume " + volumeUuid; + + } catch (FeignException e) { + s_logger.error("Failed to assign export policy to volume: {}", volumeUuid, e); + throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage()); + } catch (Exception e) { + s_logger.error("Exception while assigning export policy to volume: {}", volumeUuid, e); + throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage()); + } + } + + private boolean createFile(String volumeUuid, String filePath, FileInfo fileInfo) { + s_logger.info("Creating file: {} in volume: {}", filePath, volumeUuid); + + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + nasFeignClient.createFile(authHeader, volumeUuid, filePath, fileInfo); + s_logger.info("File created successfully: {} in volume: {}", filePath, volumeUuid); + return true; + } catch (FeignException e) { + s_logger.error("Failed to create file: {} in volume: {}", filePath, volumeUuid, e); + return false; + } catch (Exception e) { + s_logger.error("Exception while creating file: {} in volume: {}", filePath, volumeUuid, e); + return false; + } + } + + private boolean deleteFile(String volumeUuid, String filePath) { + s_logger.info("Deleting file: {} from volume: {}", filePath, volumeUuid); + + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + nasFeignClient.deleteFile(authHeader, volumeUuid, filePath); + s_logger.info("File deleted successfully: {} from volume: {}", filePath, volumeUuid); + return true; + } catch (FeignException e) { + s_logger.error("Failed to delete file: {} from volume: {}", filePath, volumeUuid, e); + return false; + } catch (Exception e) { + s_logger.error("Exception while deleting file: {} from volume: {}", filePath, volumeUuid, e); + return false; + } + } + + private OntapResponse getFileInfo(String volumeUuid, String filePath) { + s_logger.debug("Getting file info for: {} in volume: {}", filePath, volumeUuid); + + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + OntapResponse response = nasFeignClient.getFileResponse(authHeader, volumeUuid, filePath); + s_logger.debug("Retrieved file info for: {} in volume: {}", filePath, volumeUuid); + return response; + } catch (FeignException e) { + if (e.status() == 404) { + s_logger.debug("File not found: {} in volume: {}", filePath, volumeUuid); + return null; + } + s_logger.error("Failed to get file info: {} in volume: {}", filePath, volumeUuid, e); + throw new CloudRuntimeException("Failed to get file info: " + e.getMessage()); + } catch (Exception e) { + s_logger.error("Exception while getting file info: {} in volume: {}", filePath, volumeUuid, e); + throw new CloudRuntimeException("Failed to get file info: " + e.getMessage()); + } + } + + private boolean updateFile(String volumeUuid, String filePath, FileInfo fileInfo) { + s_logger.info("Updating file: {} in volume: {}", filePath, volumeUuid); + + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + nasFeignClient.updateFile( authHeader, volumeUuid, filePath, fileInfo); + s_logger.info("File updated successfully: {} in volume: {}", filePath, volumeUuid); + return true; + } catch (FeignException e) { + s_logger.error("Failed to update file: {} in volume: {}", filePath, volumeUuid, e); + return false; + } catch (Exception e) { + s_logger.error("Exception while updating file: {} in volume: {}", filePath, volumeUuid, e); + return false; + } + } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java index c4dfce7ce51c..a3467cc30d2d 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java @@ -19,14 +19,24 @@ package org.apache.cloudstack.storage.service.model; +import com.cloud.host.HostVO; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.storage.feign.model.ExportPolicy; import org.apache.cloudstack.storage.feign.model.Igroup; +import java.util.List; + public class AccessGroup { private Igroup igroup; private ExportPolicy exportPolicy; + private List hostsToConnect; + private PrimaryDataStoreInfo primaryDataStoreInfo; + private Scope scope; + + public Igroup getIgroup() { return igroup; } @@ -42,4 +52,23 @@ public ExportPolicy getPolicy() { public void setPolicy(ExportPolicy policy) { this.exportPolicy = policy; } + public List getHostsToConnect() { + return hostsToConnect; + } + public void setHostsToConnect(List hostsToConnect) { + this.hostsToConnect = hostsToConnect; + } + public PrimaryDataStoreInfo getPrimaryDataStoreInfo() { + return primaryDataStoreInfo; + } + public void setPrimaryDataStoreInfo(PrimaryDataStoreInfo primaryDataStoreInfo) { + this.primaryDataStoreInfo = primaryDataStoreInfo; + } + public Scope getScope() { + return scope; + } + public void setScope(Scope scope) { + this.scope = scope; + } + } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java index a7f5d8659d03..694c4a2c126f 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java @@ -21,11 +21,15 @@ import org.apache.cloudstack.storage.feign.model.FileInfo; import org.apache.cloudstack.storage.feign.model.Lun; +import org.apache.cloudstack.storage.feign.model.Volume; public class CloudStackVolume { private FileInfo file; private Lun lun; + private Volume volume; + // will be replaced after testing + private String cloudstackVolName; public FileInfo getFile() { return file; @@ -42,4 +46,16 @@ public Lun getLun() { public void setLun(Lun lun) { this.lun = lun; } + public Volume getVolume() { + return volume; + } + public void setVolume(Volume volume) { + this.volume = volume; + } + public String getCloudstackVolName() { + return cloudstackVolName; + } + public void setCloudstackVolName(String cloudstackVolName) { + this.cloudstackVolName = cloudstackVolName; + } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java index 00dca62480dc..47b55ec29bb7 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java @@ -20,6 +20,6 @@ package org.apache.cloudstack.storage.service.model; public enum ProtocolType { - NFS3, + NFS, ISCSI } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java index b58e8484cd48..a81fdb0a8ab5 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java @@ -31,6 +31,8 @@ public class Constants { public static final String MANAGEMENT_LIF = "managementLIF"; public static final String IS_DISAGGREGATED = "isDisaggregated"; public static final String RUNNING = "running"; + public static final String VOLUME_UUID = "volumeUUID"; + public static final String VOLUME_NAME = "volumeNAME"; public static final int ONTAP_PORT = 443; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index af48724f984c..cd02cbf10481 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -21,11 +21,16 @@ import com.cloud.utils.StringUtils; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.feign.model.FileInfo; import org.apache.cloudstack.storage.feign.model.Lun; import org.apache.cloudstack.storage.feign.model.LunSpace; +import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.feign.model.Svm; +import org.apache.cloudstack.storage.feign.model.Volume; +import org.apache.cloudstack.storage.provider.StorageProviderFactory; +import org.apache.cloudstack.storage.service.StorageStrategy; import org.apache.cloudstack.storage.service.model.CloudStackVolume; import org.apache.cloudstack.storage.service.model.ProtocolType; import org.apache.logging.log4j.LogManager; @@ -53,41 +58,82 @@ public static String generateAuthHeader (String username, String password) { return BASIC + StringUtils.SPACE + new String(encodedBytes); } - public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, DataObject dataObject) { - CloudStackVolume cloudStackVolumeRequest = null; + public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, VolumeInfo volumeObject) { + CloudStackVolume cloudStackVolumeRequest = null; - String protocol = details.get(Constants.PROTOCOL); - if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) { - cloudStackVolumeRequest = new CloudStackVolume(); - Lun lunRequest = new Lun(); - Svm svm = new Svm(); - svm.setName(details.get(Constants.SVM_NAME)); - lunRequest.setSvm(svm); + String protocol = details.get(Constants.PROTOCOL); + ProtocolType protocolType = ProtocolType.valueOf(protocol); + switch (protocolType) { + case NFS: + // TODO add logic for NFS file creation + cloudStackVolumeRequest = new CloudStackVolume(); + FileInfo file = new FileInfo(); + //file.setName("test1"); // to be replaced with volume name // this should not be passed for dir + //file.setName(volumeObject.getName()); // to check whether this needs to be sent or not + file.setSize(Long.parseLong("10000")); + file.setSize(volumeObject.getSize()); + file.setUnixPermissions(755); // check if it is needed only for dir ? it is needed for dir + file.setType(FileInfo.TypeEnum.DIRECTORY); // We are creating file for a cloudstack volume . Should it be dir ? // TODO change once multipart is done - LunSpace lunSpace = new LunSpace(); - lunSpace.setSize(dataObject.getSize()); - lunRequest.setSpace(lunSpace); - //Lun name is full path like in unified "/vol/VolumeName/LunName" - String lunFullName = Constants.VOLUME_PATH_PREFIX + storagePool.getName() + Constants.PATH_SEPARATOR + dataObject.getName(); - lunRequest.setName(lunFullName); + Volume poolVolume = new Volume(); + poolVolume.setName(details.get(Constants.VOLUME_NAME)); + poolVolume.setUuid(details.get(Constants.VOLUME_UUID)); + cloudStackVolumeRequest.setVolume(poolVolume); + cloudStackVolumeRequest.setFile(file); + cloudStackVolumeRequest.setCloudstackVolName(volumeObject.getName()); + break; + case ISCSI: + cloudStackVolumeRequest = new CloudStackVolume(); + Lun lunRequest = new Lun(); + Svm svm = new Svm(); + svm.setName(details.get(Constants.SVM_NAME)); + lunRequest.setSvm(svm); - String hypervisorType = storagePool.getHypervisor().name(); - String osType = null; - switch (hypervisorType) { - case Constants.KVM: - osType = Lun.OsTypeEnum.LINUX.getValue(); - break; - default: - String errMsg = "createCloudStackVolume : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage"; - s_logger.error(errMsg); - throw new CloudRuntimeException(errMsg); - } - lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType)); + LunSpace lunSpace = new LunSpace(); + lunSpace.setSize(volumeObject.getSize()); + lunRequest.setSpace(lunSpace); + //Lun name is full path like in unified "/vol/VolumeName/LunName" + String lunFullName = Constants.VOLUME_PATH_PREFIX + storagePool.getName() + Constants.PATH_SEPARATOR + volumeObject.getName(); + lunRequest.setName(lunFullName); - cloudStackVolumeRequest.setLun(lunRequest); - return cloudStackVolumeRequest; - } else { - throw new CloudRuntimeException("createCloudStackVolumeRequestByProtocol: Unsupported protocol " + protocol); - } + String hypervisorType = storagePool.getHypervisor().name(); + String osType = null; + switch (hypervisorType) { + case Constants.KVM: + osType = Lun.OsTypeEnum.LINUX.getValue(); + break; + default: + String errMsg = "createCloudStackVolume : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage"; + s_logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType)); + cloudStackVolumeRequest.setLun(lunRequest); + break; + default: + throw new CloudRuntimeException("createCloudStackVolumeRequestByProtocol: Unsupported protocol " + protocol); + + } + return cloudStackVolumeRequest; + } + + public static StorageStrategy getStrategyByStoragePoolDetails(Map details) { + if (details == null || details.isEmpty()) { + s_logger.error("getStrategyByStoragePoolDetails: Storage pool details are null or empty"); + throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Storage pool details are null or empty"); + } + String protocol = details.get(Constants.PROTOCOL); + OntapStorage ontapStorage = new OntapStorage(details.get(Constants.USERNAME), details.get(Constants.PASSWORD), + details.get(Constants.MANAGEMENT_LIF), details.get(Constants.SVM_NAME), ProtocolType.valueOf(protocol), + Boolean.parseBoolean(details.get(Constants.IS_DISAGGREGATED))); + StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage); + boolean isValid = storageStrategy.connect(); + if (isValid) { + s_logger.info("Connection to Ontap SVM [{}] successful", details.get(Constants.SVM_NAME)); + return storageStrategy; + } else { + s_logger.error("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed"); + throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed"); + } } } From ee2197f34e9f5c1fc5819d3fdd5c14893a5b0350 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 7 Nov 2025 11:38:56 +0530 Subject: [PATCH 02/29] Licencse add in files --- .../storage/feign/FeignConfiguration.java | 19 +++++++++++++++++++ .../storage/provider/OntapHostListener.java | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java index ce2783add228..fc4d3484506b 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java @@ -1,3 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + package org.apache.cloudstack.storage.feign; import feign.RequestInterceptor; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java index beec2edabdff..2f1a81da468a 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java @@ -1,3 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + package org.apache.cloudstack.storage.provider; import com.cloud.exception.StorageConflictException; From d14a23e275f37e02068fd5d781843d9e08f7d8ee Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Mon, 10 Nov 2025 15:18:34 +0530 Subject: [PATCH 03/29] accessgroup create recode --- plugins/storage/volume/ontap/pom.xml | 3 +- .../driver/OntapPrimaryDatastoreDriver.java | 1 - .../storage/feign/FeignConfiguration.java | 41 ++++++++-- .../OntapPrimaryDatastoreLifecycle.java | 31 +++++++- .../storage/provider/OntapHostListener.java | 74 ++++++++++--------- .../OntapPrimaryDatastoreProvider.java | 6 +- .../storage/service/StorageStrategy.java | 36 ++++++--- .../storage/service/UnifiedNASStrategy.java | 63 ++++++++++++---- 8 files changed, 177 insertions(+), 78 deletions(-) mode change 100644 => 100755 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java mode change 100644 => 100755 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java diff --git a/plugins/storage/volume/ontap/pom.xml b/plugins/storage/volume/ontap/pom.xml index 10ca7935f408..3da605f12f9d 100644 --- a/plugins/storage/volume/ontap/pom.xml +++ b/plugins/storage/volume/ontap/pom.xml @@ -31,7 +31,6 @@ 2021.0.7 11.0 20230227 - 2.15.2 4.5.14 1.6.2 3.8.1 @@ -77,7 +76,7 @@ com.fasterxml.jackson.core jackson-databind - ${jackson-databind.version} + 2.13.4 org.apache.httpcomponents diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index bf027c6a1466..979e761eb4b0 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -179,7 +179,6 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore @Override public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { - } @Override diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java index fc4d3484506b..e9c504e8de71 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java @@ -30,7 +30,7 @@ import feign.codec.EncodeException; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.DeserializationFeature; -import com.fasterxml.jackson.databind.json.JsonMapper; +import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.http.conn.ConnectionKeepAliveStrategy; import org.apache.http.conn.ssl.NoopHostnameVerifier; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; @@ -55,13 +55,11 @@ public class FeignConfiguration { private final int retryMaxInterval = 5; private final String ontapFeignMaxConnection = "80"; private final String ontapFeignMaxConnectionPerRoute = "20"; - private final JsonMapper jsonMapper; + private final ObjectMapper jsonMapper; public FeignConfiguration() { - this.jsonMapper = JsonMapper.builder() - .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) - .findAndAddModules() - .build(); + this.jsonMapper = new ObjectMapper(); + this.jsonMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); } public Client createClient() { @@ -139,16 +137,43 @@ public Decoder createDecoder() { @Override public Object decode(Response response, Type type) throws IOException, DecodeException { if (response.body() == null) { + logger.debug("Response body is null, returning null"); return null; } String json = null; try (InputStream bodyStream = response.body().asInputStream()) { json = new String(bodyStream.readAllBytes(), StandardCharsets.UTF_8); logger.debug("Decoding JSON response: {}", json); - return jsonMapper.readValue(json, jsonMapper.getTypeFactory().constructType(type)); + logger.debug("Target type: {}", type); + logger.debug("About to call jsonMapper.readValue()..."); + + Object result = null; + try { + logger.debug("Calling jsonMapper.constructType()..."); + var javaType = jsonMapper.getTypeFactory().constructType(type); + logger.debug("constructType() returned: {}", javaType); + + logger.debug("Calling jsonMapper.readValue() with json and javaType..."); + result = jsonMapper.readValue(json, javaType); + logger.debug("jsonMapper.readValue() completed successfully"); + } catch (Throwable ex) { + logger.error("EXCEPTION in jsonMapper.readValue()! Type: {}, Message: {}", ex.getClass().getName(), ex.getMessage(), ex); + throw ex; + } + + if (result == null) { + logger.warn("Decoded result is null!"); + } else { + logger.debug("Successfully decoded to object of type: {}", result.getClass().getName()); + } + logger.debug("Returning result from decoder"); + return result; } catch (IOException e) { - logger.error("Error decoding JSON response. Status: {}, Raw body: {}", response.status(), json, e); + logger.error("IOException during decoding. Status: {}, Raw body: {}", response.status(), json, e); throw new DecodeException(response.status(), "Error decoding JSON response", response.request(), e); + } catch (Exception e) { + logger.error("Unexpected error during decoding. Status: {}, Type: {}, Raw body: {}", response.status(), type, json, e); + throw new DecodeException(response.status(), "Unexpected error during decoding", response.request(), e); } } }; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java old mode 100644 new mode 100755 index e25fa7af3644..784f7ab6ed02 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -38,8 +38,11 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl; +import org.apache.cloudstack.storage.feign.model.ExportPolicy; import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.Volume; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; @@ -62,6 +65,7 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl @Inject private StorageManager _storageMgr; @Inject private ResourceManager _resourceMgr; @Inject private PrimaryDataStoreHelper _dataStoreHelper; + @Inject private StoragePoolDetailsDao storagePoolDetailsDao; private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class); // ONTAP minimum volume size is 1.56 GB (1677721600 bytes) @@ -216,9 +220,21 @@ public DataStore initialize(Map dsInfos) { long volumeSize = Long.parseLong(details.get(Constants.SIZE)); s_logger.info("Creating ONTAP volume '" + storagePoolName + "' with size: " + volumeSize + " bytes (" + (volumeSize / (1024 * 1024 * 1024)) + " GB)"); - Volume volume = storageStrategy.createStorageVolume(storagePoolName, volumeSize); - details.put(Constants.VOLUME_UUID, volume.getUuid()); - details.put(Constants.VOLUME_NAME, volume.getName()); + try { + Volume volume = storageStrategy.createStorageVolume(storagePoolName, volumeSize); + if (volume == null) { + s_logger.error("createStorageVolume returned null for volume: " + storagePoolName); + throw new CloudRuntimeException("Failed to create ONTAP volume: " + storagePoolName); + } + + s_logger.info("Volume object retrieved successfully. UUID: " + volume.getUuid() + ", Name: " + volume.getName()); + + details.putIfAbsent(Constants.VOLUME_UUID, volume.getUuid()); + details.putIfAbsent(Constants.VOLUME_NAME, volume.getName()); + } catch (Exception e) { + s_logger.error("Exception occurred while creating ONTAP volume: " + storagePoolName, e); + throw new CloudRuntimeException("Failed to create ONTAP volume: " + storagePoolName + ". Error: " + e.getMessage(), e); + } } else { throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage"); } @@ -249,14 +265,21 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore; List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore); + logger.debug(" datastore object received is {} ",primaryStore ); + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryStore.getClusterId())); - Map details = primaryStore.getDetails(); // TODO check while testing , if it is populated we can remove below db call + Map details = storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId()); StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); + Svm svm = new Svm(); + svm.setName(details.get(Constants.SVM_NAME)); + ExportPolicy exportPolicy = new ExportPolicy(); + exportPolicy.setSvm(svm); AccessGroup accessGroupRequest = new AccessGroup(); accessGroupRequest.setHostsToConnect(hostsToConnect); accessGroupRequest.setScope(scope); accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); + accessGroupRequest.setPolicy(exportPolicy); strategy.createAccessGroup(accessGroupRequest); logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java old mode 100644 new mode 100755 index 2f1a81da468a..3fb6406aa9cc --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java @@ -18,39 +18,41 @@ */ package org.apache.cloudstack.storage.provider; - -import com.cloud.exception.StorageConflictException; -import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; - -class OntapHostListener implements HypervisorHostListener { - - @Override - public boolean hostAdded(long hostId) { - return false; - } - - @Override - public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { - return false; - } - - @Override - public boolean hostDisconnected(long hostId, long poolId) { - return false; - } - - @Override - public boolean hostAboutToBeRemoved(long hostId) { - return false; - } - - @Override - public boolean hostRemoved(long hostId, long clusterId) { - return false; - } - - @Override - public boolean hostEnabled(long hostId) { - return false; - } -} \ No newline at end of file +// +//import com.cloud.exception.StorageConflictException; +//import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +// +//public class OntapHostListener implements HypervisorHostListener { +// +// public OntapHostListener(){} +// +// @Override +// public boolean hostAdded(long hostId) { +// return false; +// } +// +// @Override +// public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { +// return false; +// } +// +// @Override +// public boolean hostDisconnected(long hostId, long poolId) { +// return false; +// } +// +// @Override +// public boolean hostAboutToBeRemoved(long hostId) { +// return false; +// } +// +// @Override +// public boolean hostRemoved(long hostId, long clusterId) { +// return false; +// } +// +// @Override +// public boolean hostEnabled(long hostId) { +// return false; +// } +//} \ No newline at end of file diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java index 75d6f6310512..d954d1d413cf 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java @@ -41,7 +41,7 @@ public class OntapPrimaryDatastoreProvider implements PrimaryDataStoreProvider { private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreProvider.class); private OntapPrimaryDatastoreDriver primaryDatastoreDriver; private OntapPrimaryDatastoreLifecycle primaryDatastoreLifecycle; - private OntapHostListener ontapHostListener; + // private HypervisorHostListener listener; public OntapPrimaryDatastoreProvider() { s_logger.info("OntapPrimaryDatastoreProvider initialized"); @@ -58,7 +58,7 @@ public DataStoreDriver getDataStoreDriver() { @Override public HypervisorHostListener getHostListener() { - return ontapHostListener; + return null; } @Override @@ -72,7 +72,7 @@ public boolean configure(Map params) { s_logger.trace("OntapPrimaryDatastoreProvider: configure: Called"); primaryDatastoreDriver = ComponentContext.inject(OntapPrimaryDatastoreDriver.class); primaryDatastoreLifecycle = ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class); - ontapHostListener = ComponentContext.inject(OntapHostListener.class); + // listener = ComponentContext.inject(OntapHostListener.class); return true; } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index 37310a1467c2..045cf23af67f 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -196,19 +196,37 @@ public Volume createStorageVolume(String volumeName, Long size) { } s_logger.info("Volume created successfully: " + volumeName); // Below code is to update volume uuid to storage pool mapping once and used for all other workflow saving get volume call - OntapResponse ontapVolume = new OntapResponse<>(); try { Map queryParams = Map.of(Constants.NAME, volumeName); - ontapVolume = volumeFeignClient.getVolume(authHeader, queryParams); - if ((ontapVolume == null || ontapVolume.getRecords().isEmpty())) { - s_logger.error("Exception while getting volume volume not found:"); - throw new CloudRuntimeException("Failed to fetch volume " + volumeName); + s_logger.debug("Fetching volume details for: " + volumeName); + + OntapResponse ontapVolume = volumeFeignClient.getVolume(authHeader, queryParams); + s_logger.debug("Feign call completed. Processing response..."); + + if (ontapVolume == null) { + s_logger.error("OntapResponse is null for volume: " + volumeName); + throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": Response is null"); + } + s_logger.debug("OntapResponse is not null. Checking records field..."); + + if (ontapVolume.getRecords() == null) { + s_logger.error("OntapResponse.records is null for volume: " + volumeName); + throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": Records list is null"); } - }catch (Exception e) { - s_logger.error("Exception while getting volume: " + e.getMessage()); - throw new CloudRuntimeException("Failed to fetch volume: " + e.getMessage()); + s_logger.debug("Records field is not null. Size: " + ontapVolume.getRecords().size()); + + if (ontapVolume.getRecords().isEmpty()) { + s_logger.error("OntapResponse.records is empty for volume: " + volumeName); + throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": No records found"); + } + + Volume volume = ontapVolume.getRecords().get(0); + s_logger.info("Volume retrieved successfully: " + volumeName + ", UUID: " + volume.getUuid()); + return volume; + } catch (Exception e) { + s_logger.error("Exception while retrieving volume details for: " + volumeName, e); + throw new CloudRuntimeException("Failed to fetch volume: " + volumeName + ". Error: " + e.getMessage(), e); } - return ontapVolume.getRecords().get(0); } /** diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index cd6631c119bb..2df144193035 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -19,12 +19,14 @@ package org.apache.cloudstack.storage.service; +import com.cloud.host.HostVO; import com.cloud.utils.exception.CloudRuntimeException; import feign.FeignException; import org.apache.cloudstack.storage.feign.FeignClientFactory; import org.apache.cloudstack.storage.feign.client.NASFeignClient; import org.apache.cloudstack.storage.feign.client.VolumeFeignClient; import org.apache.cloudstack.storage.feign.model.ExportPolicy; +import org.apache.cloudstack.storage.feign.model.ExportRule; import org.apache.cloudstack.storage.feign.model.FileInfo; import org.apache.cloudstack.storage.feign.model.Nas; import org.apache.cloudstack.storage.feign.model.OntapStorage; @@ -37,6 +39,9 @@ import org.apache.cloudstack.storage.utils.Utility; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; + +import java.util.ArrayList; +import java.util.List; import java.util.Map; public class UnifiedNASStrategy extends NASStrategy { @@ -93,7 +98,35 @@ CloudStackVolume getCloudStackVolume(CloudStackVolume cloudstackVolume) { @Override public AccessGroup createAccessGroup(AccessGroup accessGroup) { - //TODO + + + // Create the export policy + String svmName = accessGroup.getPolicy().getSvm().getName(); + String exportPolicyName = "export-" + svmName + "-" + accessGroup.getPrimaryDataStoreInfo().getName(); + + ExportPolicy exportPolicy = new ExportPolicy(); + exportPolicy.setName(exportPolicyName); + + Svm svm = new Svm(); + svm.setName(svmName); + exportPolicy.setSvm(svm); + + List rules = new ArrayList<>(); + ExportRule exportRule = new ExportRule(); + + List hosts = accessGroup.getHostsToConnect(); + for (HostVO host : hosts) { + host.getStorageIpAddress() + } + + + exportPolicy.setRules(rules); + ExportPolicy createExportPolicy = createExportPolicy(svmName, exportPolicy); + + + + + // attach export policy to volume of storage pool return null; } @@ -125,36 +158,36 @@ void disableLogicalAccess(Map values) { } - private ExportPolicy createExportPolicy(String svmName, String policyName) { - s_logger.info("Creating export policy: {} for SVM: {}", policyName, svmName); + private ExportPolicy createExportPolicy(String svmName, ExportPolicy policy) { + s_logger.info("Creating export policy: {} for SVM: {}", policy, svmName); try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - // Create ExportPolicy object - ExportPolicy exportPolicy = new ExportPolicy(); - exportPolicy.setName(policyName); - - // Set SVM - Svm svm = new Svm(); - svm.setName(svmName); - exportPolicy.setSvm(svm); +// // Create ExportPolicy object +// ExportPolicy exportPolicy = new ExportPolicy(); +// exportPolicy.setName(policyName); +// +// // Set SVM +// Svm svm = new Svm(); +// svm.setName(svmName); +// exportPolicy.setSvm(svm); // Create export policy - ExportPolicy createdPolicy = nasFeignClient.createExportPolicy(authHeader, exportPolicy); + ExportPolicy createdPolicy = nasFeignClient.createExportPolicy(authHeader, policy); if (createdPolicy != null && createdPolicy.getId() != null) { s_logger.info("Export policy created successfully with ID: {}", createdPolicy.getId()); return createdPolicy; } else { - throw new CloudRuntimeException("Failed to create export policy: " + policyName); + throw new CloudRuntimeException("Failed to create export policy: " + policy); } } catch (FeignException e) { - s_logger.error("Failed to create export policy: {}", policyName, e); + s_logger.error("Failed to create export policy: {}", policy, e); throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage()); } catch (Exception e) { - s_logger.error("Exception while creating export policy: {}", policyName, e); + s_logger.error("Exception while creating export policy: {}", policy, e); throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage()); } } From 419bdb932eb383aeacb34f5222a12c5e192cf21f Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Mon, 10 Nov 2025 19:29:16 +0530 Subject: [PATCH 04/29] creatacessgroup for NFS impl --- .../storage/feign/client/NASFeignClient.java | 6 +- .../storage/feign/model/ExportRule.java | 23 +++ .../OntapPrimaryDatastoreLifecycle.java | 11 +- .../storage/service/UnifiedNASStrategy.java | 147 ++++++++++++------ 4 files changed, 129 insertions(+), 58 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java index 339962cad25e..58280047e3fd 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java @@ -58,16 +58,16 @@ void createFile(@Param("authHeader") String authHeader, // Export Policy Operations @RequestLine("POST /") @Headers({"Authorization: {authHeader}"}) - ExportPolicy createExportPolicy(@Param("authHeader") String authHeader, + void createExportPolicy(@Param("authHeader") String authHeader, ExportPolicy exportPolicy); @RequestLine("GET /") @Headers({"Authorization: {authHeader}"}) - OntapResponse getExportPolicyResponse(@Param("authHeader") String authHeader); + ExportPolicy getExportPolicyResponse(@Param("authHeader") String authHeader); @RequestLine("GET /{id}") @Headers({"Authorization: {authHeader}"}) - OntapResponse getExportPolicyById(@Param("authHeader") String authHeader, + ExportPolicy getExportPolicyById(@Param("authHeader") String authHeader, @Param("id") String id); @RequestLine("DELETE /{id}") diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java index 8f3c9597dca7..769f94836b31 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java @@ -76,6 +76,13 @@ public static ProtocolsEnum fromValue(String text) { @JsonProperty("protocols") private List protocols = null; + @JsonProperty("ro_rule") + private List roRule = null; + + @JsonProperty("rw_rule") + private List rwRule = null; + + public ExportRule anonymousUser(String anonymousUser) { this.anonymousUser = anonymousUser; return this; @@ -140,6 +147,22 @@ public void setMatch (String match) { } } + public List getRwRule() { + return rwRule; + } + + public void setRwRule(List rwRule) { + this.rwRule = rwRule; + } + + public List getRoRule() { + return roRule; + } + + public void setRoRule(List roRule) { + this.roRule = roRule; + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 784f7ab6ed02..838aece8201e 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -42,7 +42,6 @@ import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl; import org.apache.cloudstack.storage.feign.model.ExportPolicy; import org.apache.cloudstack.storage.feign.model.OntapStorage; -import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.Volume; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; @@ -271,13 +270,11 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { Map details = storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId()); StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); - Svm svm = new Svm(); - svm.setName(details.get(Constants.SVM_NAME)); ExportPolicy exportPolicy = new ExportPolicy(); - exportPolicy.setSvm(svm); AccessGroup accessGroupRequest = new AccessGroup(); accessGroupRequest.setHostsToConnect(hostsToConnect); accessGroupRequest.setScope(scope); + primaryStore.setDetails(details);// setting details as it does not come from cloudstack accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); accessGroupRequest.setPolicy(exportPolicy); strategy.createAccessGroup(accessGroupRequest); @@ -308,13 +305,17 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM); logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); - Map details = primaryStore.getDetails(); // TODO check while testing , if it is populated we can remove below db call + Map details = storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId()); StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); + ExportPolicy exportPolicy = new ExportPolicy(); AccessGroup accessGroupRequest = new AccessGroup(); accessGroupRequest.setHostsToConnect(hostsToConnect); accessGroupRequest.setScope(scope); + primaryStore.setDetails(details); // setting details as it does not come from cloudstack accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); + accessGroupRequest.setPolicy(exportPolicy); strategy.createAccessGroup(accessGroupRequest); + for (HostVO host : hostsToConnect) { // TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster try { diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index 2df144193035..1212d75903c7 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -22,16 +22,20 @@ import com.cloud.host.HostVO; import com.cloud.utils.exception.CloudRuntimeException; import feign.FeignException; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.feign.FeignClientFactory; +import org.apache.cloudstack.storage.feign.client.JobFeignClient; import org.apache.cloudstack.storage.feign.client.NASFeignClient; import org.apache.cloudstack.storage.feign.client.VolumeFeignClient; import org.apache.cloudstack.storage.feign.model.ExportPolicy; import org.apache.cloudstack.storage.feign.model.ExportRule; import org.apache.cloudstack.storage.feign.model.FileInfo; +import org.apache.cloudstack.storage.feign.model.Job; import org.apache.cloudstack.storage.feign.model.Nas; import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.Volume; +import org.apache.cloudstack.storage.feign.model.response.JobResponse; import org.apache.cloudstack.storage.feign.model.response.OntapResponse; import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; @@ -40,6 +44,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import javax.inject.Inject; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -50,6 +55,9 @@ public class UnifiedNASStrategy extends NASStrategy { private final FeignClientFactory feignClientFactory; private final NASFeignClient nasFeignClient; private final VolumeFeignClient volumeFeignClient; + private final JobFeignClient jobFeignClient; + @Inject + private StoragePoolDetailsDao storagePoolDetailsDao; public UnifiedNASStrategy(OntapStorage ontapStorage) { super(ontapStorage); @@ -58,6 +66,7 @@ public UnifiedNASStrategy(OntapStorage ontapStorage) { this.feignClientFactory = new FeignClientFactory(); this.nasFeignClient = feignClientFactory.createClient(NASFeignClient.class, baseURL); this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class, baseURL); + this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL); } public void setOntapStorage(OntapStorage ontapStorage) { @@ -99,35 +108,54 @@ CloudStackVolume getCloudStackVolume(CloudStackVolume cloudstackVolume) { @Override public AccessGroup createAccessGroup(AccessGroup accessGroup) { - // Create the export policy - String svmName = accessGroup.getPolicy().getSvm().getName(); - String exportPolicyName = "export-" + svmName + "-" + accessGroup.getPrimaryDataStoreInfo().getName(); + Map details = accessGroup.getPrimaryDataStoreInfo().getDetails(); + String svmName = details.get(Constants.SVM_NAME); + String volumeUUID = details.get(Constants.VOLUME_UUID); + String volumeName = details.get(Constants.VOLUME_NAME); + String exportPolicyName = "export-" + svmName + "-" + volumeName;// TODO move this to util ExportPolicy exportPolicy = new ExportPolicy(); - exportPolicy.setName(exportPolicyName); - - Svm svm = new Svm(); - svm.setName(svmName); - exportPolicy.setSvm(svm); List rules = new ArrayList<>(); ExportRule exportRule = new ExportRule(); + List exportClients = new ArrayList<>(); List hosts = accessGroup.getHostsToConnect(); for (HostVO host : hosts) { - host.getStorageIpAddress() + String hostStorageIp = host.getStorageIpAddress(); + String ip = (hostStorageIp != null && !hostStorageIp.isEmpty()) + ? hostStorageIp + : host.getPrivateIpAddress(); + String ipToUse = ip + "/32"; + ExportRule.ExportClient exportClient = new ExportRule.ExportClient(); + exportClient.setMatch(ipToUse); + exportClients.add(exportClient); } + exportRule.setClients(exportClients); + exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.any)); + exportRule.setRoRule(List.of("any")); + exportRule.setRwRule(List.of("any")); + rules.add(exportRule); - + Svm svm = new Svm(); + svm.setName(svmName); + exportPolicy.setSvm(svm); exportPolicy.setRules(rules); - ExportPolicy createExportPolicy = createExportPolicy(svmName, exportPolicy); - - - - - // attach export policy to volume of storage pool - return null; + exportPolicy.setName(exportPolicyName); + try { + createExportPolicy(svmName, exportPolicy); + s_logger.info("ExportPolicy created: {}, now attaching this policy to storage pool volume", exportPolicy.getName()); + + // attach export policy to volume of storage pool + assignExportPolicyToVolume(volumeUUID,exportPolicy.getName()); + s_logger.info("Successfully assigned exportPolicy {} to volume {}", exportPolicy.getName(), volumeName); + accessGroup.setPolicy(exportPolicy); + return accessGroup; + }catch (Exception e){ + s_logger.error("Exception occurred while creating access group: " + e); + throw new CloudRuntimeException("Failed to create access group: " + e); + } } @Override @@ -158,31 +186,13 @@ void disableLogicalAccess(Map values) { } - private ExportPolicy createExportPolicy(String svmName, ExportPolicy policy) { + private void createExportPolicy(String svmName, ExportPolicy policy) { s_logger.info("Creating export policy: {} for SVM: {}", policy, svmName); try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - -// // Create ExportPolicy object -// ExportPolicy exportPolicy = new ExportPolicy(); -// exportPolicy.setName(policyName); -// -// // Set SVM -// Svm svm = new Svm(); -// svm.setName(svmName); -// exportPolicy.setSvm(svm); - - // Create export policy - ExportPolicy createdPolicy = nasFeignClient.createExportPolicy(authHeader, policy); - - if (createdPolicy != null && createdPolicy.getId() != null) { - s_logger.info("Export policy created successfully with ID: {}", createdPolicy.getId()); - return createdPolicy; - } else { - throw new CloudRuntimeException("Failed to create export policy: " + policy); - } - + nasFeignClient.createExportPolicy(authHeader, policy); + s_logger.info("Export policy created successfully with name {}", policy.getName()); } catch (FeignException e) { s_logger.error("Failed to create export policy: {}", policy, e); throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage()); @@ -192,17 +202,16 @@ private ExportPolicy createExportPolicy(String svmName, ExportPolicy policy) { } } - private void deleteExportPolicy(String svmName, String policyName) { try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); + ExportPolicy policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); - if (policiesResponse.getRecords() == null || policiesResponse.getRecords().isEmpty()) { + if (policiesResponse == null ) { s_logger.warn("Export policy not found for deletion: {}", policyName); throw new CloudRuntimeException("Export policy not found : " + policyName); } - String policyId = policiesResponse.getRecords().get(0).getId().toString(); + String policyId = policiesResponse.getId().toString(); nasFeignClient.deleteExportPolicyById(authHeader, policyId); s_logger.info("Export policy deleted successfully: {}", policyName); } catch (Exception e) { @@ -216,26 +225,64 @@ private String addExportRule(String policyName, String clientMatch, String[] pro return ""; } - private String assignExportPolicyToVolume(String volumeUuid, String policyName) { + private void assignExportPolicyToVolume(String volumeUuid, String policyName) { s_logger.info("Assigning export policy: {} to volume: {}", policyName, volumeUuid); try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); - if (policiesResponse.getRecords() == null || policiesResponse.getRecords().isEmpty()) { + ExportPolicy policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); + if (policiesResponse == null) { throw new CloudRuntimeException("Export policy not found: " + policyName); } - ExportPolicy exportPolicy = policiesResponse.getRecords().get(0); // Create Volume update object with NAS configuration Volume volumeUpdate = new Volume(); Nas nas = new Nas(); - nas.setExportPolicy(exportPolicy); + ExportPolicy policy = new ExportPolicy(); + policy.setName(policiesResponse.getName()); + nas.setExportPolicy(policy); volumeUpdate.setNas(nas); - volumeFeignClient.updateVolumeRebalancing(authHeader, volumeUuid, volumeUpdate); - s_logger.info("Export policy successfully assigned to volume: {}", volumeUuid); - return "Export policy " + policyName + " assigned to volume " + volumeUuid; + try { + /* + ONTAP created a default rule of 0.0.0.0 if no export rule are defined while creating volume + and since in storage pool creation, cloudstack is not aware of the host , we can either create default or + permissive rule and later update it as part of attachCluster or attachZone implementation + */ + JobResponse jobResponse = volumeFeignClient.updateVolumeRebalancing(authHeader, volumeUuid, volumeUpdate); + if (jobResponse == null || jobResponse.getJob() == null) { + throw new CloudRuntimeException("Failed to attach policy " + policiesResponse.getName() + "to volume " + volumeUuid); + } + String jobUUID = jobResponse.getJob().getUuid(); + + //Create URI for GET Job API + int jobRetryCount = 0; + Job createVolumeJob = null; + while(createVolumeJob == null || !createVolumeJob.getState().equals(Constants.JOB_SUCCESS)) { + if(jobRetryCount >= Constants.JOB_MAX_RETRIES) { + s_logger.error("Job to update volume " + volumeUuid + " did not complete within expected time."); + throw new CloudRuntimeException("Job to update volume " + volumeUuid + " did not complete within expected time."); + } + + try { + createVolumeJob = jobFeignClient.getJobByUUID(authHeader, jobUUID); + if (createVolumeJob == null) { + s_logger.warn("Job with UUID " + jobUUID + " not found. Retrying..."); + } else if (createVolumeJob.getState().equals(Constants.JOB_FAILURE)) { + throw new CloudRuntimeException("Job to update volume " + volumeUuid + " failed with error: " + createVolumeJob.getMessage()); + } + } catch (FeignException.FeignClientException e) { + throw new CloudRuntimeException("Failed to fetch job status: " + e.getMessage()); + } + + jobRetryCount++; + Thread.sleep(Constants.CREATE_VOLUME_CHECK_SLEEP_TIME); // Sleep for 2 seconds before polling again + } + } catch (Exception e) { + s_logger.error("Exception while updating volume: ", e); + throw new CloudRuntimeException("Failed to update volume: " + e.getMessage()); + } + s_logger.info("Export policy successfully assigned to volume: {}", volumeUuid); } catch (FeignException e) { s_logger.error("Failed to assign export policy to volume: {}", volumeUuid, e); throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage()); From 8e000542528ab462996c5fe5d25f6ce46c68e903 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 14 Nov 2025 10:22:28 +0530 Subject: [PATCH 05/29] storage pool mounting on host --- .../storage/feign/client/NASFeignClient.java | 23 +-- .../OntapPrimaryDatastoreLifecycle.java | 9 +- .../storage/listener/OntapHostListener.java | 168 ++++++++++++++++++ .../storage/provider/OntapHostListener.java | 58 ------ .../OntapPrimaryDatastoreProvider.java | 7 +- .../storage/service/StorageStrategy.java | 6 + .../storage/service/UnifiedNASStrategy.java | 122 ++++++++----- .../cloudstack/storage/utils/Constants.java | 2 + 8 files changed, 275 insertions(+), 120 deletions(-) create mode 100644 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java delete mode 100755 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java index 58280047e3fd..f48f83dc28de 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.storage.feign.client; +import feign.QueryMap; import org.apache.cloudstack.storage.feign.model.ExportPolicy; import org.apache.cloudstack.storage.feign.model.FileInfo; import org.apache.cloudstack.storage.feign.model.response.OntapResponse; @@ -26,29 +27,31 @@ import feign.Param; import feign.RequestLine; +import java.util.Map; + public interface NASFeignClient { // File Operations - @RequestLine("GET /{volumeUuid}/files/{path}") + @RequestLine("GET /api/storage/volumes/{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) OntapResponse getFileResponse(@Param("authHeader") String authHeader, @Param("volumeUuid") String volumeUUID, @Param("path") String filePath); - @RequestLine("DELETE /{volumeUuid}/files/{path}") + @RequestLine("DELETE /api/storage/volumes/{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) void deleteFile(@Param("authHeader") String authHeader, @Param("volumeUuid") String volumeUUID, @Param("path") String filePath); - @RequestLine("PATCH /{volumeUuid}/files/{path}") + @RequestLine("PATCH /api/storage/volumes/{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) void updateFile(@Param("authHeader") String authHeader, @Param("volumeUuid") String volumeUUID, @Param("path") String filePath, FileInfo fileInfo); - @RequestLine("POST /{volumeUuid}/files/{path}") + @RequestLine("POST /api/storage/volumes/{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) void createFile(@Param("authHeader") String authHeader, @Param("volumeUuid") String volumeUUID, @@ -56,26 +59,26 @@ void createFile(@Param("authHeader") String authHeader, FileInfo file); // Export Policy Operations - @RequestLine("POST /") + @RequestLine("POST /api/protocols/nfs/export-policies") @Headers({"Authorization: {authHeader}"}) void createExportPolicy(@Param("authHeader") String authHeader, ExportPolicy exportPolicy); - @RequestLine("GET /") + @RequestLine("GET /api/protocols/nfs/export-policies") @Headers({"Authorization: {authHeader}"}) - ExportPolicy getExportPolicyResponse(@Param("authHeader") String authHeader); + OntapResponse getExportPolicyResponse(@Param("authHeader") String authHeader, @QueryMap Map queryMap); - @RequestLine("GET /{id}") + @RequestLine("GET /api/protocols/nfs/export-policies/{id}") @Headers({"Authorization: {authHeader}"}) ExportPolicy getExportPolicyById(@Param("authHeader") String authHeader, @Param("id") String id); - @RequestLine("DELETE /{id}") + @RequestLine("DELETE /api/protocols/nfs/export-policies/{id}") @Headers({"Authorization: {authHeader}"}) void deleteExportPolicyById(@Param("authHeader") String authHeader, @Param("id") String id); - @RequestLine("PATCH /{id}") + @RequestLine("PATCH /api/protocols/nfs/export-policies/{id}") @Headers({"Authorization: {authHeader}"}) OntapResponse updateExportPolicy(@Param("authHeader") String authHeader, @Param("id") String id, diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 838aece8201e..17be8d9508d1 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -188,17 +188,22 @@ public DataStore initialize(Map dsInfos) { // Determine storage pool type and path based on protocol String path; + String host = ""; ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); switch (protocol) { case NFS: parameters.setType(Storage.StoragePoolType.NetworkFilesystem); - path = details.get(Constants.MANAGEMENT_LIF) + ":/" + storagePoolName; + // Path should be just the NFS export path (junction path), NOT host:path + // CloudStack will construct the full mount path as: hostAddress + ":" + path + path = "/" + storagePoolName; s_logger.info("Setting NFS path for storage pool: " + path); + host = "10.193.192.136"; // TODO hardcoded for now break; case ISCSI: parameters.setType(Storage.StoragePoolType.Iscsi); path = "iqn.1992-08.com.netapp:" + details.get(Constants.SVM_NAME) + "." + storagePoolName; s_logger.info("Setting iSCSI path for storage pool: " + path); + parameters.setHost(details.get(Constants.MANAGEMENT_LIF)); break; default: throw new CloudRuntimeException("Unsupported protocol: " + protocol + ", cannot create primary storage"); @@ -239,8 +244,8 @@ public DataStore initialize(Map dsInfos) { } // Set parameters for primary data store - parameters.setHost(details.get(Constants.MANAGEMENT_LIF)); parameters.setPort(Constants.ONTAP_PORT); + parameters.setHost(host); parameters.setPath(path); parameters.setTags(tags != null ? tags : ""); parameters.setIsTagARule(isTagARule != null ? isTagARule : Boolean.FALSE); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java new file mode 100644 index 000000000000..cf9cd5510ce0 --- /dev/null +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -0,0 +1,168 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.listener; + +import javax.inject.Inject; + +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.alert.AlertManager; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.host.Host; +import com.cloud.storage.StoragePool; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import com.cloud.host.dao.HostDao; + +/** + * HypervisorHostListener implementation for ONTAP storage. + * Handles connecting/disconnecting hosts to/from ONTAP-backed storage pools. + */ +public class OntapHostListener implements HypervisorHostListener { + protected Logger logger = LogManager.getLogger(getClass()); + + @Inject + private AgentManager _agentMgr; + @Inject + private AlertManager _alertMgr; + @Inject + private PrimaryDataStoreDao _storagePoolDao; + @Inject + private HostDao _hostDao; + @Inject private StoragePoolHostDao storagePoolHostDao; + + + @Override + public boolean hostConnect(long hostId, long poolId) { + logger.info("Connect to host " + hostId + " from pool " + poolId); + Host host = _hostDao.findById(hostId); + if (host == null) { + logger.error("Failed to add host by HostListener as host was not found with id : {}", hostId); + return false; + } + + // TODO add host type check also since we support only KVM for now, host.getHypervisorType().equals(HypervisorType.KVM) + StoragePool pool = _storagePoolDao.findById(poolId); + logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName()); + + + // incase host was not added by cloudstack , we will add it + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); + + if (storagePoolHost == null) { + storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); + + storagePoolHostDao.persist(storagePoolHost); + } + + // Validate pool type - ONTAP supports NFS and iSCSI +// StoragePoolType poolType = pool.getPoolType(); +// // TODO add iscsi also here +// if (poolType != StoragePoolType.NetworkFilesystem) { +// logger.error("Unsupported pool type {} for ONTAP storage", poolType); +// return false; +// } + + try { + // Create the CreateStoragePoolCommand to send to the agent + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); + + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer == null) { + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command (%s)", pool)); + } + + if (!answer.getResult()) { + String msg = String.format("Unable to attach storage pool %s to host %d", pool, hostId); + + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg); + + throw new CloudRuntimeException(String.format( + "Unable to establish a connection from agent to storage pool %s due to %s", pool, answer.getDetails())); + } + } catch (Exception e) { + logger.error("Exception while connecting host {} to storage pool {}", host.getName(), pool.getName(), e); + throw new CloudRuntimeException("Failed to connect host to storage pool: " + e.getMessage(), e); + } + return true; + } + + @Override + public boolean hostDisconnected(Host host, StoragePool pool) { + logger.info("Disconnect from host " + host.getId() + " from pool " + pool.getName()); + + Host hostToremove = _hostDao.findById(host.getId()); + if (hostToremove == null) { + logger.error("Failed to add host by HostListener as host was not found with id : {}", host.getId()); + return false; + } + // TODO add storage pool get validation + logger.info("Disconnecting host {} from ONTAP storage pool {}", host.getName(), pool.getName()); + + try { + DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand(pool); + long hostId = host.getId(); + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer != null && answer.getResult()) { + logger.info("Successfully disconnected host {} from ONTAP storage pool {}", host.getName(), pool.getName()); + return true; + } else { + String errMsg = (answer != null) ? answer.getDetails() : "Unknown error"; + logger.warn("Failed to disconnect host {} from storage pool {}. Error: {}", host.getName(), pool.getName(), errMsg); + return false; + } + } catch (Exception e) { + logger.error("Exception while disconnecting host {} from storage pool {}", host.getName(), pool.getName(), e); + return false; + } + } + + @Override + public boolean hostDisconnected(long hostId, long poolId) { + return false; + } + + @Override + public boolean hostAboutToBeRemoved(long hostId) { + return false; + } + + @Override + public boolean hostRemoved(long hostId, long clusterId) { + return false; + } + + @Override + public boolean hostEnabled(long hostId) { + return false; + } + + @Override + public boolean hostAdded(long hostId) { + return false; + } + +} diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java deleted file mode 100755 index 3fb6406aa9cc..000000000000 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.cloudstack.storage.provider; -// -//import com.cloud.exception.StorageConflictException; -//import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; -// -//public class OntapHostListener implements HypervisorHostListener { -// -// public OntapHostListener(){} -// -// @Override -// public boolean hostAdded(long hostId) { -// return false; -// } -// -// @Override -// public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { -// return false; -// } -// -// @Override -// public boolean hostDisconnected(long hostId, long poolId) { -// return false; -// } -// -// @Override -// public boolean hostAboutToBeRemoved(long hostId) { -// return false; -// } -// -// @Override -// public boolean hostRemoved(long hostId, long clusterId) { -// return false; -// } -// -// @Override -// public boolean hostEnabled(long hostId) { -// return false; -// } -//} \ No newline at end of file diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java index d954d1d413cf..4079792f87d8 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java @@ -27,6 +27,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; import org.apache.cloudstack.storage.driver.OntapPrimaryDatastoreDriver; import org.apache.cloudstack.storage.lifecycle.OntapPrimaryDatastoreLifecycle; +import org.apache.cloudstack.storage.listener.OntapHostListener; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.springframework.stereotype.Component; @@ -41,7 +42,7 @@ public class OntapPrimaryDatastoreProvider implements PrimaryDataStoreProvider { private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreProvider.class); private OntapPrimaryDatastoreDriver primaryDatastoreDriver; private OntapPrimaryDatastoreLifecycle primaryDatastoreLifecycle; - // private HypervisorHostListener listener; + private HypervisorHostListener listener; public OntapPrimaryDatastoreProvider() { s_logger.info("OntapPrimaryDatastoreProvider initialized"); @@ -58,7 +59,7 @@ public DataStoreDriver getDataStoreDriver() { @Override public HypervisorHostListener getHostListener() { - return null; + return listener; } @Override @@ -72,7 +73,7 @@ public boolean configure(Map params) { s_logger.trace("OntapPrimaryDatastoreProvider: configure: Called"); primaryDatastoreDriver = ComponentContext.inject(OntapPrimaryDatastoreDriver.class); primaryDatastoreLifecycle = ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class); - // listener = ComponentContext.inject(OntapHostListener.class); + listener = ComponentContext.inject(OntapHostListener.class); return true; } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index 045cf23af67f..b11c60e63385 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -27,6 +27,7 @@ import org.apache.cloudstack.storage.feign.client.VolumeFeignClient; import org.apache.cloudstack.storage.feign.model.Aggregate; import org.apache.cloudstack.storage.feign.model.Job; +import org.apache.cloudstack.storage.feign.model.Nas; import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.Volume; @@ -150,10 +151,15 @@ public Volume createStorageVolume(String volumeName, Long size) { Svm svm = new Svm(); svm.setName(svmName); + Nas nas = new Nas(); + nas.setPath("/" + volumeName); + volumeRequest.setName(volumeName); volumeRequest.setSvm(svm); volumeRequest.setAggregates(aggregates); volumeRequest.setSize(size); + volumeRequest.setNas(nas); // be default if we don't set path , ONTAP create a volume with mount/junction path // TODO check if we need to append svm name or not + // since storage pool also cannot be duplicate so junction path can also be not duplicate so /volumeName will always be unique // Make the POST API call to create the volume try { /* diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index 1212d75903c7..4672edd22033 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -64,9 +64,10 @@ public UnifiedNASStrategy(OntapStorage ontapStorage) { String baseURL = Constants.HTTPS + ontapStorage.getManagementLIF(); // Initialize FeignClientFactory and create NAS client this.feignClientFactory = new FeignClientFactory(); + // NAS client uses export policy API endpoint this.nasFeignClient = feignClientFactory.createClient(NASFeignClient.class, baseURL); - this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class, baseURL); - this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL); + this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class,baseURL ); + this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL ); } public void setOntapStorage(OntapStorage ontapStorage) { @@ -108,51 +109,23 @@ CloudStackVolume getCloudStackVolume(CloudStackVolume cloudstackVolume) { @Override public AccessGroup createAccessGroup(AccessGroup accessGroup) { - // Create the export policy Map details = accessGroup.getPrimaryDataStoreInfo().getDetails(); String svmName = details.get(Constants.SVM_NAME); String volumeUUID = details.get(Constants.VOLUME_UUID); String volumeName = details.get(Constants.VOLUME_NAME); - String exportPolicyName = "export-" + svmName + "-" + volumeName;// TODO move this to util - - ExportPolicy exportPolicy = new ExportPolicy(); - - List rules = new ArrayList<>(); - ExportRule exportRule = new ExportRule(); - - List exportClients = new ArrayList<>(); - List hosts = accessGroup.getHostsToConnect(); - for (HostVO host : hosts) { - String hostStorageIp = host.getStorageIpAddress(); - String ip = (hostStorageIp != null && !hostStorageIp.isEmpty()) - ? hostStorageIp - : host.getPrivateIpAddress(); - String ipToUse = ip + "/32"; - ExportRule.ExportClient exportClient = new ExportRule.ExportClient(); - exportClient.setMatch(ipToUse); - exportClients.add(exportClient); - } - exportRule.setClients(exportClients); - exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.any)); - exportRule.setRoRule(List.of("any")); - exportRule.setRwRule(List.of("any")); - rules.add(exportRule); - Svm svm = new Svm(); - svm.setName(svmName); - exportPolicy.setSvm(svm); - exportPolicy.setRules(rules); - exportPolicy.setName(exportPolicyName); + // Create the export policy + ExportPolicy policyRequest = createExportPolicyRequest(accessGroup,svmName,volumeName); try { - createExportPolicy(svmName, exportPolicy); - s_logger.info("ExportPolicy created: {}, now attaching this policy to storage pool volume", exportPolicy.getName()); + createExportPolicy(svmName, policyRequest); + s_logger.info("ExportPolicy created: {}, now attaching this policy to storage pool volume", policyRequest.getName()); // attach export policy to volume of storage pool - assignExportPolicyToVolume(volumeUUID,exportPolicy.getName()); - s_logger.info("Successfully assigned exportPolicy {} to volume {}", exportPolicy.getName(), volumeName); - accessGroup.setPolicy(exportPolicy); + assignExportPolicyToVolume(volumeUUID,policyRequest.getName()); + s_logger.info("Successfully assigned exportPolicy {} to volume {}", policyRequest.getName(), volumeName); + accessGroup.setPolicy(policyRequest); return accessGroup; - }catch (Exception e){ + }catch(Exception e){ s_logger.error("Exception occurred while creating access group: " + e); throw new CloudRuntimeException("Failed to create access group: " + e); } @@ -192,6 +165,18 @@ private void createExportPolicy(String svmName, ExportPolicy policy) { try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); nasFeignClient.createExportPolicy(authHeader, policy); + try { + Map queryParams = Map.of(Constants.NAME, policy.getName()); + OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams); + if (policiesResponse == null || policiesResponse.getRecords().isEmpty()) { + throw new CloudRuntimeException("Export policy " + policy.getName() + " was not created on ONTAP. " + + "Received successful response but policy does not exist."); + } + s_logger.info("Export policy created and verified successfully: " + policy.getName()); + } catch (FeignException e) { + s_logger.error("Failed to verify export policy creation: " + policy.getName(), e); + throw new CloudRuntimeException("Export policy creation verification failed: " + e.getMessage()); + } s_logger.info("Export policy created successfully with name {}", policy.getName()); } catch (FeignException e) { s_logger.error("Failed to create export policy: {}", policy, e); @@ -205,13 +190,14 @@ private void createExportPolicy(String svmName, ExportPolicy policy) { private void deleteExportPolicy(String svmName, String policyName) { try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - ExportPolicy policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); + Map queryParams = Map.of(Constants.NAME, policyName); + OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams); if (policiesResponse == null ) { s_logger.warn("Export policy not found for deletion: {}", policyName); throw new CloudRuntimeException("Export policy not found : " + policyName); } - String policyId = policiesResponse.getId().toString(); + String policyId = String.valueOf(policiesResponse.getRecords().get(0).getId()); nasFeignClient.deleteExportPolicyById(authHeader, policyId); s_logger.info("Export policy deleted successfully: {}", policyName); } catch (Exception e) { @@ -230,15 +216,18 @@ private void assignExportPolicyToVolume(String volumeUuid, String policyName) { try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - ExportPolicy policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); - if (policiesResponse == null) { + Map queryParams = Map.of(Constants.NAME, policyName); + OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams); + if (policiesResponse == null || policiesResponse.getRecords().isEmpty()) { + s_logger.error("Export policy not found for assigning rule: {}", policyName); throw new CloudRuntimeException("Export policy not found: " + policyName); } + // Create Volume update object with NAS configuration Volume volumeUpdate = new Volume(); Nas nas = new Nas(); ExportPolicy policy = new ExportPolicy(); - policy.setName(policiesResponse.getName()); + policy.setName(policyName); nas.setExportPolicy(policy); volumeUpdate.setNas(nas); @@ -250,7 +239,7 @@ private void assignExportPolicyToVolume(String volumeUuid, String policyName) { */ JobResponse jobResponse = volumeFeignClient.updateVolumeRebalancing(authHeader, volumeUuid, volumeUpdate); if (jobResponse == null || jobResponse.getJob() == null) { - throw new CloudRuntimeException("Failed to attach policy " + policiesResponse.getName() + "to volume " + volumeUuid); + throw new CloudRuntimeException("Failed to attach policy " + policyName + "to volume " + volumeUuid); } String jobUUID = jobResponse.getJob().getUuid(); @@ -334,14 +323,14 @@ private OntapResponse getFileInfo(String volumeUuid, String filePath) OntapResponse response = nasFeignClient.getFileResponse(authHeader, volumeUuid, filePath); s_logger.debug("Retrieved file info for: {} in volume: {}", filePath, volumeUuid); return response; - } catch (FeignException e) { + } catch (FeignException e){ if (e.status() == 404) { s_logger.debug("File not found: {} in volume: {}", filePath, volumeUuid); return null; } s_logger.error("Failed to get file info: {} in volume: {}", filePath, volumeUuid, e); throw new CloudRuntimeException("Failed to get file info: " + e.getMessage()); - } catch (Exception e) { + } catch (Exception e){ s_logger.error("Exception while getting file info: {} in volume: {}", filePath, volumeUuid, e); throw new CloudRuntimeException("Failed to get file info: " + e.getMessage()); } @@ -358,9 +347,48 @@ private boolean updateFile(String volumeUuid, String filePath, FileInfo fileInfo } catch (FeignException e) { s_logger.error("Failed to update file: {} in volume: {}", filePath, volumeUuid, e); return false; - } catch (Exception e) { + } catch (Exception e){ s_logger.error("Exception while updating file: {} in volume: {}", filePath, volumeUuid, e); return false; } } + + private String generateExportPolicyName(String svmName, String volumeName){ + return Constants.EXPORT + Constants.HYPHEN + svmName + Constants.HYPHEN + volumeName; + } + + private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String svmName , String volumeName){ + + String exportPolicyName = generateExportPolicyName(svmName,volumeName); + ExportPolicy exportPolicy = new ExportPolicy(); + + List rules = new ArrayList<>(); + ExportRule exportRule = new ExportRule(); + + List exportClients = new ArrayList<>(); + List hosts = accessGroup.getHostsToConnect(); + for (HostVO host : hosts) { + String hostStorageIp = host.getStorageIpAddress(); + String ip = (hostStorageIp != null && !hostStorageIp.isEmpty()) + ? hostStorageIp + : host.getPrivateIpAddress(); + String ipToUse = ip + "/32"; + ExportRule.ExportClient exportClient = new ExportRule.ExportClient(); + exportClient.setMatch(ipToUse); + exportClients.add(exportClient); + } + exportRule.setClients(exportClients); + exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.any)); + exportRule.setRoRule(List.of("any")); + exportRule.setRwRule(List.of("any")); + rules.add(exportRule); + + Svm svm = new Svm(); + svm.setName(svmName); + exportPolicy.setSvm(svm); + exportPolicy.setRules(rules); + exportPolicy.setName(exportPolicyName); + + return exportPolicy; + } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java index a81fdb0a8ab5..a885179da05b 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java @@ -33,6 +33,7 @@ public class Constants { public static final String RUNNING = "running"; public static final String VOLUME_UUID = "volumeUUID"; public static final String VOLUME_NAME = "volumeNAME"; + public static final String EXPORT = "export"; public static final int ONTAP_PORT = 443; @@ -55,6 +56,7 @@ public class Constants { public static final String EQUALS = "="; public static final String SEMICOLON = ";"; public static final String COMMA = ","; + public static final String HYPHEN = "-"; public static final String VOLUME_PATH_PREFIX = "/vol/"; From 3fdea75cbd2595d7d825cd559e640c7831c280dc Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 14 Nov 2025 13:47:43 +0530 Subject: [PATCH 06/29] storage pool mounting on host 1 --- .../driver/OntapPrimaryDatastoreDriver.java | 1 + .../storage/listener/OntapHostListener.java | 55 +++++++++++++------ .../storage/service/UnifiedNASStrategy.java | 2 +- 3 files changed, 39 insertions(+), 19 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 979e761eb4b0..17a23aeec47f 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -116,6 +116,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet createCmdResult = new CreateCmdResult(null, new Answer(null, false, errMsg)); createCmdResult.setResult(e.toString()); } finally { + s_logger.info("Volume creation successfully completed"); callback.complete(createCmdResult); } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java index cf9cd5510ce0..1b24d22fd285 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -20,6 +20,8 @@ import javax.inject.Inject; import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.agent.api.ModifyStoragePoolAnswer; +import com.cloud.agent.api.StoragePoolInfo; import com.cloud.alert.AlertManager; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.dao.StoragePoolHostDao; @@ -32,6 +34,7 @@ import com.cloud.storage.StoragePool; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import com.cloud.host.dao.HostDao; @@ -67,25 +70,8 @@ public boolean hostConnect(long hostId, long poolId) { logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName()); - // incase host was not added by cloudstack , we will add it - StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); - - if (storagePoolHost == null) { - storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); - - storagePoolHostDao.persist(storagePoolHost); - } - - // Validate pool type - ONTAP supports NFS and iSCSI -// StoragePoolType poolType = pool.getPoolType(); -// // TODO add iscsi also here -// if (poolType != StoragePoolType.NetworkFilesystem) { -// logger.error("Unsupported pool type {} for ONTAP storage", poolType); -// return false; -// } - try { - // Create the CreateStoragePoolCommand to send to the agent + // Create the ModifyStoragePoolCommand to send to the agent ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); Answer answer = _agentMgr.easySend(hostId, cmd); @@ -102,6 +88,39 @@ public boolean hostConnect(long hostId, long poolId) { throw new CloudRuntimeException(String.format( "Unable to establish a connection from agent to storage pool %s due to %s", pool, answer.getDetails())); } + + // Get the mount path from the answer + ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer; + StoragePoolInfo poolInfo = mspAnswer.getPoolInfo(); + if (poolInfo == null) { + throw new CloudRuntimeException("ModifyStoragePoolAnswer returned null poolInfo"); + } + + String localPath = poolInfo.getLocalPath(); + logger.info("Storage pool {} successfully mounted at: {}", pool.getName(), localPath); + + // Update or create the storage_pool_host_ref entry with the correct local_path + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); + + if (storagePoolHost == null) { + storagePoolHost = new StoragePoolHostVO(poolId, hostId, localPath); + storagePoolHostDao.persist(storagePoolHost); + logger.info("Created storage_pool_host_ref entry for pool {} and host {}", pool.getName(), host.getName()); + } else { + storagePoolHost.setLocalPath(localPath); + storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost); + logger.info("Updated storage_pool_host_ref entry with local_path: {}", localPath); + } + + // Update pool capacity/usage information + StoragePoolVO poolVO = _storagePoolDao.findById(poolId); + if (poolVO != null && poolInfo.getCapacityBytes() > 0) { + poolVO.setCapacityBytes(poolInfo.getCapacityBytes()); + poolVO.setUsedBytes(poolInfo.getCapacityBytes() - poolInfo.getAvailableBytes()); + _storagePoolDao.update(poolVO.getId(), poolVO); + logger.info("Updated storage pool capacity: {} GB, used: {} GB", poolInfo.getCapacityBytes() / (1024 * 1024 * 1024), (poolInfo.getCapacityBytes() - poolInfo.getAvailableBytes()) / (1024 * 1024 * 1024)); + } + } catch (Exception e) { logger.error("Exception while connecting host {} to storage pool {}", host.getName(), pool.getName(), e); throw new CloudRuntimeException("Failed to connect host to storage pool: " + e.getMessage(), e); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index 4672edd22033..883bd554b767 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -372,7 +372,7 @@ private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String sv String ip = (hostStorageIp != null && !hostStorageIp.isEmpty()) ? hostStorageIp : host.getPrivateIpAddress(); - String ipToUse = ip + "/32"; + String ipToUse = ip + "/31"; // TODO since we have 2 IPs internal and external ExportRule.ExportClient exportClient = new ExportRule.ExportClient(); exportClient.setMatch(ipToUse); exportClients.add(exportClient); From 18602845a903f2dbedd7b01378b45e65bcdc0093 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Mon, 17 Nov 2025 17:00:56 +0530 Subject: [PATCH 07/29] vm restart issue --- .../storage/feign/model/ExportRule.java | 11 +++++++++++ .../OntapPrimaryDatastoreLifecycle.java | 2 ++ .../storage/listener/OntapHostListener.java | 18 ++++++++++++++++-- .../storage/service/UnifiedNASStrategy.java | 1 + 4 files changed, 30 insertions(+), 2 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java index 769f94836b31..788fc8b5544d 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java @@ -82,6 +82,9 @@ public static ProtocolsEnum fromValue(String text) { @JsonProperty("rw_rule") private List rwRule = null; + @JsonProperty("superuser") + private List superuser = null; + public ExportRule anonymousUser(String anonymousUser) { this.anonymousUser = anonymousUser; @@ -163,6 +166,14 @@ public void setRoRule(List roRule) { this.roRule = roRule; } + public List getSuperuser() { + return superuser; + } + + public void setSuperuser(List superuser) { + this.superuser = superuser; + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 17be8d9508d1..64a39056f31f 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -291,6 +291,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + throw new CloudRuntimeException("Failed to attach storage pool to cluster: " + e.getMessage(), e); } } _dataStoreHelper.attachCluster(dataStore); @@ -327,6 +328,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + throw new CloudRuntimeException("Failed to attach storage pool to host: " + e.getMessage(), e); } } _dataStoreHelper.attachZone(dataStore); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java index 1b24d22fd285..9db7774bc21b 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -67,9 +67,21 @@ public boolean hostConnect(long hostId, long poolId) { // TODO add host type check also since we support only KVM for now, host.getHypervisorType().equals(HypervisorType.KVM) StoragePool pool = _storagePoolDao.findById(poolId); + if (pool == null) { + logger.error("Failed to connect host - storage pool not found with id: {}", poolId); + return false; + } + + // CRITICAL: Check if already connected to avoid infinite loops + StoragePoolHostVO existingConnection = storagePoolHostDao.findByPoolHost(poolId, hostId); + if (existingConnection != null && existingConnection.getLocalPath() != null && !existingConnection.getLocalPath().isEmpty()) { + logger.info("Host {} is already connected to storage pool {} at path {}. Skipping reconnection.", + host.getName(), pool.getName(), existingConnection.getLocalPath()); + return true; + } + logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName()); - try { // Create the ModifyStoragePoolCommand to send to the agent ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); @@ -123,7 +135,9 @@ public boolean hostConnect(long hostId, long poolId) { } catch (Exception e) { logger.error("Exception while connecting host {} to storage pool {}", host.getName(), pool.getName(), e); - throw new CloudRuntimeException("Failed to connect host to storage pool: " + e.getMessage(), e); + // CRITICAL: Don't throw exception - it crashes the agent and causes restart loops + // Return false to indicate failure without crashing + return false; } return true; } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index 883bd554b767..6044ca86230e 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -381,6 +381,7 @@ private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String sv exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.any)); exportRule.setRoRule(List.of("any")); exportRule.setRwRule(List.of("any")); + exportRule.setSuperuser(List.of("any")); // Allow root/superuser access for NFS writes rules.add(exportRule); Svm svm = new Svm(); From 71b5ddf0d9004328f4616fdc908483a733a48f01 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Mon, 17 Nov 2025 17:02:55 +0530 Subject: [PATCH 08/29] vm restart issue 1 --- .../cloudstack/storage/listener/OntapHostListener.java | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java index 9db7774bc21b..2fe8bf28fbe6 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -71,17 +71,13 @@ public boolean hostConnect(long hostId, long poolId) { logger.error("Failed to connect host - storage pool not found with id: {}", poolId); return false; } - // CRITICAL: Check if already connected to avoid infinite loops StoragePoolHostVO existingConnection = storagePoolHostDao.findByPoolHost(poolId, hostId); if (existingConnection != null && existingConnection.getLocalPath() != null && !existingConnection.getLocalPath().isEmpty()) { - logger.info("Host {} is already connected to storage pool {} at path {}. Skipping reconnection.", - host.getName(), pool.getName(), existingConnection.getLocalPath()); + logger.info("Host {} is already connected to storage pool {} at path {}. Skipping reconnection.", host.getName(), pool.getName(), existingConnection.getLocalPath()); return true; } - logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName()); - try { // Create the ModifyStoragePoolCommand to send to the agent ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); From 1c3321130ac8c2bec12aa2dd4da30c790bfd4973 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Mon, 17 Nov 2025 19:13:10 +0530 Subject: [PATCH 09/29] vm restart issue 2 --- .../storage/lifecycle/OntapPrimaryDatastoreLifecycle.java | 4 ++-- .../cloudstack/storage/listener/OntapHostListener.java | 1 + .../cloudstack/storage/service/UnifiedNASStrategy.java | 8 ++++---- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 64a39056f31f..676192d7ed96 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -291,7 +291,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); - throw new CloudRuntimeException("Failed to attach storage pool to cluster: " + e.getMessage(), e); + return false; } } _dataStoreHelper.attachCluster(dataStore); @@ -328,7 +328,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); - throw new CloudRuntimeException("Failed to attach storage pool to host: " + e.getMessage(), e); + return false; } } _dataStoreHelper.attachZone(dataStore); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java index 2fe8bf28fbe6..3b031cbb6a24 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -77,6 +77,7 @@ public boolean hostConnect(long hostId, long poolId) { logger.info("Host {} is already connected to storage pool {} at path {}. Skipping reconnection.", host.getName(), pool.getName(), existingConnection.getLocalPath()); return true; } + logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName()); try { // Create the ModifyStoragePoolCommand to send to the agent diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index 6044ca86230e..8410fd23534a 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -372,16 +372,16 @@ private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String sv String ip = (hostStorageIp != null && !hostStorageIp.isEmpty()) ? hostStorageIp : host.getPrivateIpAddress(); - String ipToUse = ip + "/31"; // TODO since we have 2 IPs internal and external + String ipToUse = ip + "/32"; ExportRule.ExportClient exportClient = new ExportRule.ExportClient(); exportClient.setMatch(ipToUse); exportClients.add(exportClient); } exportRule.setClients(exportClients); exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.any)); - exportRule.setRoRule(List.of("any")); - exportRule.setRwRule(List.of("any")); - exportRule.setSuperuser(List.of("any")); // Allow root/superuser access for NFS writes + exportRule.setRoRule(List.of("sys")); // Use sys (Unix UID/GID) authentication for NFS + exportRule.setRwRule(List.of("sys")); // Use sys (Unix UID/GID) authentication for NFS + exportRule.setSuperuser(List.of("sys")); // Allow root/superuser access with sys auth rules.add(exportRule); Svm svm = new Svm(); From 899d68eff1189dd9c3bd56a3ab21778ed2ce4702 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Thu, 20 Nov 2025 21:28:17 +0530 Subject: [PATCH 10/29] vm instance creation test1 --- .../driver/OntapPrimaryDatastoreDriver.java | 174 ++++++++++++++++-- .../OntapPrimaryDatastoreLifecycle.java | 2 +- .../storage/service/UnifiedNASStrategy.java | 25 ++- .../cloudstack/storage/utils/Utility.java | 28 ++- 4 files changed, 197 insertions(+), 32 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 17a23aeec47f..00a4ded8c866 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -27,6 +27,7 @@ import com.cloud.storage.Storage; import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; @@ -64,13 +65,14 @@ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver { @Inject private StoragePoolDetailsDao storagePoolDetailsDao; @Inject private PrimaryDataStoreDao storagePoolDao; + @Inject private com.cloud.storage.dao.VolumeDao volumeDao; @Override public Map getCapabilities() { s_logger.trace("OntapPrimaryDatastoreDriver: getCapabilities: Called"); Map mapCapabilities = new HashMap<>(); - - mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString()); - mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString()); + // RAW managed initial implementation: snapshot features not yet supported + mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.FALSE.toString()); + mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.FALSE.toString()); return mapCapabilities; } @@ -116,36 +118,176 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet createCmdResult = new CreateCmdResult(null, new Answer(null, false, errMsg)); createCmdResult.setResult(e.toString()); } finally { - s_logger.info("Volume creation successfully completed"); + if (createCmdResult != null && createCmdResult.isSuccess()) { + s_logger.info("createAsync: Volume metadata created successfully. Path: {}", path); + } callback.complete(createCmdResult); } } + /** + * Creates CloudStack volume based on storage protocol type (NFS or iSCSI). + * + * For Managed NFS (Option 2 Implementation): + * - Returns only UUID without creating qcow2 file + * - KVM hypervisor creates qcow2 file automatically during VM deployment + * - ONTAP volume provides the backing NFS storage + * + * For iSCSI/Block Storage: + * - Creates LUN via ONTAP REST API + * - Returns LUN path for direct attachment + */ private String createCloudStackVolumeForTypeVolume(DataStore dataStore, DataObject dataObject) { StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); if(storagePool == null) { - s_logger.error("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); - throw new CloudRuntimeException("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); + s_logger.error("createCloudStackVolumeForTypeVolume: Storage Pool not found for id: {}", dataStore.getId()); + throw new CloudRuntimeException("createCloudStackVolumeForTypeVolume: Storage Pool not found for id: " + dataStore.getId()); } + Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId()); + String protocol = details.get(Constants.PROTOCOL); + + if (ProtocolType.NFS.name().equalsIgnoreCase(protocol)) { + return createManagedNfsVolume(dataStore, dataObject, storagePool); + } else if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) { + return createManagedBlockVolume(dataStore, dataObject, storagePool, details); + } else { + String errMsg = String.format("createCloudStackVolumeForTypeVolume: Unsupported protocol [%s]", protocol); + s_logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } + + /** + * Creates Managed NFS Volume with ONTAP backing storage. + * + * Architecture: 1 CloudStack Storage Pool = 1 ONTAP Volume (shared by all volumes) + * + * Flow: + * 1. createAsync() stores volume metadata and NFS mount point + * 2. Volume attach triggers ManagedNfsStorageAdaptor.connectPhysicalDisk() + * 3. KVM mounts: nfs://nfsServer/junctionPath to /mnt/volumeUuid + * 4. Libvirt creates qcow2 file via storageVolCreateXML() + * 5. File created at: /vol/ontap_volume/volumeUuid (on ONTAP) + * + * Key Details: + * - All volumes in same pool share the same ONTAP volume NFS export + * - Each volume gets separate libvirt mount point: /mnt/ + * - All qcow2 files stored in same ONTAP volume: /vol// + * - volume._iScsiName stores the NFS junction path (pool.path) + * + * @param dataStore CloudStack data store (storage pool) + * @param dataObject Volume data object + * @param storagePool Storage pool VO + * @return Volume UUID (used as filename for qcow2 file) + */ + private String createManagedNfsVolume(DataStore dataStore, DataObject dataObject, StoragePoolVO storagePool) { + VolumeInfo volumeInfo = (VolumeInfo) dataObject; + VolumeVO volume = volumeDao.findById(volumeInfo.getId()); + String volumeUuid = volumeInfo.getUuid(); + + // Get the NFS junction path from storage pool + // This is the path that was set during pool creation (e.g., "/my_pool_volume") + String junctionPath = storagePool.getPath(); + + // Update volume metadata in CloudStack database + volume.setPoolType(Storage.StoragePoolType.ManagedNFS); + volume.setPoolId(dataStore.getId()); + volume.setPath(volumeUuid); // Filename for qcow2 file + + // CRITICAL: Store junction path in _iScsiName field + // CloudStack will use this in AttachCommand as DiskTO.MOUNT_POINT + // ManagedNfsStorageAdaptor will mount: nfs://hostAddress/junctionPath to /mnt/volumeUuid + volume.set_iScsiName(junctionPath); + + volumeDao.update(volume.getId(), volume); + + s_logger.info("ONTAP Managed NFS Volume Created: uuid={}, path={}, junctionPath={}, format=QCOW2, " + + "pool={}, size={}GB. Libvirt will create qcow2 file at mount time.", + volumeUuid, volumeUuid, junctionPath, storagePool.getName(), + volumeInfo.getSize() / (1024 * 1024 * 1024)); + + // Optional: Prepare ONTAP volume for optimal qcow2 storage (future enhancement) + // prepareOntapVolumeForQcow2Storage(dataStore, volumeInfo); + + return volumeUuid; + } + + /** + * Creates iSCSI/Block volume by calling ONTAP REST API to create a LUN. + * + * For block storage (iSCSI), the storage provider must create the LUN + * before CloudStack can use it. This is different from NFS where the + * hypervisor creates the file. + * + * @param dataStore CloudStack data store + * @param dataObject Volume data object + * @param storagePool Storage pool VO + * @param details Storage pool details containing ONTAP connection info + * @return LUN path/name for iSCSI attachment + */ + private String createManagedBlockVolume(DataStore dataStore, DataObject dataObject, + StoragePoolVO storagePool, Map details) { StorageStrategy storageStrategy = getStrategyByStoragePoolDetails(details); - s_logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(Constants.SVM_NAME)); + + s_logger.info("createManagedBlockVolume: Creating iSCSI LUN on ONTAP SVM [{}]", details.get(Constants.SVM_NAME)); + CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, (VolumeInfo) dataObject); + CloudStackVolume cloudStackVolume = storageStrategy.createCloudStackVolume(cloudStackVolumeRequest); - if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL)) && cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) { - return cloudStackVolume.getLun().getName(); - } else if (ProtocolType.NFS.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { - return cloudStackVolume.getFile().getName(); + + if (cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) { + String lunPath = cloudStackVolume.getLun().getName(); + s_logger.info("createManagedBlockVolume: iSCSI LUN created successfully: {}", lunPath); + return lunPath; } else { - String errMsg = "createCloudStackVolumeForTypeVolume: Volume creation failed. Lun or Lun Path is null for dataObject: " + dataObject; + String errMsg = String.format("createManagedBlockVolume: LUN creation failed for volume [%s]. " + + "LUN or LUN path is null.", dataObject.getUuid()); s_logger.error(errMsg); throw new CloudRuntimeException(errMsg); } } + /** + * Optional: Prepares ONTAP volume for optimal qcow2 file storage. + * + * Future enhancements can include: + * - Enable compression for qcow2 files + * - Set QoS policies + * - Enable deduplication + * - Configure snapshot policies + * + * This is a placeholder for ONTAP-specific optimizations. + */ + private void prepareOntapVolumeForQcow2Storage(DataStore dataStore, VolumeInfo volumeInfo) { + // TODO: Implement ONTAP volume optimizations + // Examples: + // - storageStrategy.enableCompression(volumePath) + // - storageStrategy.setQosPolicy(volumePath, iops) + // - storageStrategy.enableDeduplication(volumePath) + s_logger.debug("prepareOntapVolumeForQcow2Storage: Placeholder for future ONTAP optimizations"); + } + @Override public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback callback) { - + CommandResult commandResult = new CommandResult(); + try { + if (store == null || data == null) { + throw new CloudRuntimeException("deleteAsync: store or data is null"); + } + if (data.getType() == DataObjectType.VOLUME) { + StoragePoolVO storagePool = storagePoolDao.findById(store.getId()); + Map details = storagePoolDetailsDao.listDetailsKeyPairs(store.getId()); + if (ProtocolType.NFS.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + // ManagedNFS qcow2 backing file deletion handled by KVM host/libvirt; nothing to do via ONTAP REST. + s_logger.info("deleteAsync: ManagedNFS volume {} no-op ONTAP deletion", data.getId()); + } + } + } catch (Exception e) { + commandResult.setResult(e.getMessage()); + } finally { + callback.complete(commandResult); + } } @Override @@ -219,7 +361,7 @@ public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, Qual @Override public boolean canProvideStorageStats() { - return true; + return false; } @Override @@ -229,7 +371,7 @@ public Pair getStorageStats(StoragePool storagePool) { @Override public boolean canProvideVolumeStats() { - return true; + return false; // Not yet implemented for RAW managed NFS } @Override @@ -291,4 +433,4 @@ private StorageStrategy getStrategyByStoragePoolDetails(Map deta throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed"); } } -} +} \ No newline at end of file diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 676192d7ed96..ed17075a5594 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -192,7 +192,7 @@ public DataStore initialize(Map dsInfos) { ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); switch (protocol) { case NFS: - parameters.setType(Storage.StoragePoolType.NetworkFilesystem); + parameters.setType(Storage.StoragePoolType.ManagedNFS); // Path should be just the NFS export path (junction path), NOT host:path // CloudStack will construct the full mount path as: hostAddress + ":" + path path = "/" + storagePoolName; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index 8410fd23534a..de5dd9ffbe34 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -77,15 +77,22 @@ public void setOntapStorage(OntapStorage ontapStorage) { @Override public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume) { s_logger.info("createCloudStackVolume: Create cloudstack volume " + cloudstackVolume); - try { - createFile(cloudstackVolume.getVolume().getUuid(),cloudstackVolume.getCloudstackVolName(), cloudstackVolume.getFile()); - s_logger.debug("Successfully created file in ONTAP under volume with path {} or name {} ", cloudstackVolume.getVolume().getUuid(), cloudstackVolume.getCloudstackVolName()); - FileInfo responseFile = cloudstackVolume.getFile(); - responseFile.setPath(cloudstackVolume.getCloudstackVolName()); - }catch (Exception e) { - s_logger.error("Exception occurred while creating file or dir: {}. Exception: {}", cloudstackVolume.getCloudstackVolName(), e.getMessage()); - throw new CloudRuntimeException("Failed to create file: " + e.getMessage()); - } + // Skip ontap file creation for now +// try { +// boolean created = createFile(cloudstackVolume.getVolume().getUuid(),cloudstackVolume.getCloudstackVolName(), cloudstackVolume.getFile()); +// if(created){ +// s_logger.debug("Successfully created file in ONTAP under volume with path {} or name {} ", cloudstackVolume.getVolume().getUuid(), cloudstackVolume.getCloudstackVolName()); +// FileInfo responseFile = cloudstackVolume.getFile(); +// responseFile.setPath(cloudstackVolume.getCloudstackVolName()); +// }else { +// s_logger.error("File not created for volume {}", cloudstackVolume.getVolume().getUuid()); +// throw new CloudRuntimeException("File not created"); +// } +// +// }catch (Exception e) { +// s_logger.error("Exception occurred while creating file or dir: {}. Exception: {}", cloudstackVolume.getCloudstackVolName(), e.getMessage()); +// throw new CloudRuntimeException("Failed to create file: " + e.getMessage()); +// } return cloudstackVolume; } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index cd02cbf10481..b1f50ee513be 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -58,6 +58,20 @@ public static String generateAuthHeader (String username, String password) { return BASIC + StringUtils.SPACE + new String(encodedBytes); } + /** + * Creates CloudStackVolume request object for ONTAP REST API calls. + * + * IMPORTANT: For Managed NFS (Option 2 Implementation): + * - The NFS case below is DEPRECATED and NOT USED + * - OntapPrimaryDatastoreDriver.createManagedNfsVolume() handles NFS volumes + * - It returns UUID only without creating files (KVM creates qcow2 automatically) + * - This method is ONLY used for iSCSI/block storage volumes + * + * @param storagePool Storage pool information + * @param details Storage pool details with ONTAP connection info + * @param volumeObject Volume information + * @return CloudStackVolume request for ONTAP REST API + */ public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, VolumeInfo volumeObject) { CloudStackVolume cloudStackVolumeRequest = null; @@ -65,15 +79,17 @@ public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePo ProtocolType protocolType = ProtocolType.valueOf(protocol); switch (protocolType) { case NFS: - // TODO add logic for NFS file creation + // DEPRECATED: This NFS case is NOT USED in Option 2 Implementation + // For Managed NFS, OntapPrimaryDatastoreDriver.createManagedNfsVolume() + // returns UUID only and lets KVM create qcow2 files automatically. + // This legacy code remains for reference but is bypassed in current implementation. + s_logger.warn("createCloudStackVolumeRequestByProtocol: NFS case should not be called. " + + "Use OntapPrimaryDatastoreDriver.createManagedNfsVolume() instead."); cloudStackVolumeRequest = new CloudStackVolume(); FileInfo file = new FileInfo(); - //file.setName("test1"); // to be replaced with volume name // this should not be passed for dir - //file.setName(volumeObject.getName()); // to check whether this needs to be sent or not - file.setSize(Long.parseLong("10000")); file.setSize(volumeObject.getSize()); - file.setUnixPermissions(755); // check if it is needed only for dir ? it is needed for dir - file.setType(FileInfo.TypeEnum.DIRECTORY); // We are creating file for a cloudstack volume . Should it be dir ? // TODO change once multipart is done + file.setUnixPermissions(755); + file.setType(FileInfo.TypeEnum.FILE); Volume poolVolume = new Volume(); poolVolume.setName(details.get(Constants.VOLUME_NAME)); From 20371e681fe20ec2cacdf3d191ff97f23977b2f5 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 21 Nov 2025 02:06:30 +0530 Subject: [PATCH 11/29] vm instance creation test2 --- .../driver/OntapPrimaryDatastoreDriver.java | 27 ++++++++++++++++++- .../OntapPrimaryDatastoreLifecycle.java | 4 +++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 00a4ded8c866..b5522a2441fd 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -36,6 +36,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; @@ -45,6 +46,7 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; @@ -84,7 +86,30 @@ public DataTO getTO(DataObject data) { @Override public DataStoreTO getStoreTO(DataStore store) { - return null; + // Load storage pool details from database (includes "mountpoint" added during pool creation) + Map poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(store.getId()); + + // Set details on the store before creating PrimaryDataStoreTO + // This ensures PrimaryDataStoreTO constructor gets the details from database + PrimaryDataStore primaryStore = (PrimaryDataStore) store; + if (poolDetails != null && !poolDetails.isEmpty()) { + // Merge existing details (if any) with database details + Map existingDetails = primaryStore.getDetails(); + if (existingDetails == null) { + primaryStore.setDetails(poolDetails); + } else { + // Merge: database details take precedence + Map mergedDetails = new HashMap<>(existingDetails); + mergedDetails.putAll(poolDetails); + primaryStore.setDetails(mergedDetails); + } + } + + // Now create PrimaryDataStoreTO - it will get details from primaryStore.getDetails() + PrimaryDataStoreTO storeTO = new PrimaryDataStoreTO(primaryStore); + + s_logger.debug("OntapPrimaryDatastoreDriver: getStoreTO: Created PrimaryDataStoreTO with details from storage_pool_details table"); + return storeTO; } @Override diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index ed17075a5594..e4cd1c253aad 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -243,6 +243,10 @@ public DataStore initialize(Map dsInfos) { throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage"); } + // Add mountpoint detail for ManagedNFS - required by KVM agent's ManagedNfsStorageAdaptor + // The 'mountpoint' key is used by connectPhysicalDisk() to mount NFS export + details.put("mountpoint", path); + // Set parameters for primary data store parameters.setPort(Constants.ONTAP_PORT); parameters.setHost(host); From dc95253625e7da7745dd8c0e46af857d02b160d8 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 21 Nov 2025 03:28:30 +0530 Subject: [PATCH 12/29] vm instance creation test4 --- .../cloudstack/storage/listener/OntapHostListener.java | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java index 3b031cbb6a24..5b39e2ce5293 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -71,16 +71,11 @@ public boolean hostConnect(long hostId, long poolId) { logger.error("Failed to connect host - storage pool not found with id: {}", poolId); return false; } - // CRITICAL: Check if already connected to avoid infinite loops - StoragePoolHostVO existingConnection = storagePoolHostDao.findByPoolHost(poolId, hostId); - if (existingConnection != null && existingConnection.getLocalPath() != null && !existingConnection.getLocalPath().isEmpty()) { - logger.info("Host {} is already connected to storage pool {} at path {}. Skipping reconnection.", host.getName(), pool.getName(), existingConnection.getLocalPath()); - return true; - } - logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName()); try { // Create the ModifyStoragePoolCommand to send to the agent + // Note: Always send command even if database entry exists, because agent may have restarted + // and lost in-memory pool registration. The command handler is idempotent. ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); Answer answer = _agentMgr.easySend(hostId, cmd); From ec9f2fdb45aab465550da79dcb0240ed344aed0c Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 21 Nov 2025 03:53:46 +0530 Subject: [PATCH 13/29] vm instance creation test5 --- .../storage/driver/OntapPrimaryDatastoreDriver.java | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index b5522a2441fd..f30878a42a5d 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -108,7 +108,15 @@ public DataStoreTO getStoreTO(DataStore store) { // Now create PrimaryDataStoreTO - it will get details from primaryStore.getDetails() PrimaryDataStoreTO storeTO = new PrimaryDataStoreTO(primaryStore); - s_logger.debug("OntapPrimaryDatastoreDriver: getStoreTO: Created PrimaryDataStoreTO with details from storage_pool_details table"); + s_logger.info("OntapPrimaryDatastoreDriver: getStoreTO: Created PrimaryDataStoreTO for pool: " + store.getName()); + s_logger.info(" Pool UUID: " + store.getUuid()); + s_logger.info(" Host Address: " + primaryStore.getHostAddress()); + s_logger.info(" Path: " + primaryStore.getPath()); + s_logger.info(" Port: " + primaryStore.getPort()); + s_logger.info(" Details keys: " + (poolDetails != null ? poolDetails.keySet() : "null")); + if (poolDetails != null && poolDetails.containsKey("managedStoreTarget")) { + s_logger.info(" managedStoreTarget: " + poolDetails.get("managedStoreTarget")); + } return storeTO; } From adc30def18805e9d1170bad73d2f2251af270f31 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 21 Nov 2025 04:19:14 +0530 Subject: [PATCH 14/29] vm instance creation test6 --- .../storage/driver/OntapPrimaryDatastoreDriver.java | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index f30878a42a5d..285caf408eca 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -113,10 +113,7 @@ public DataStoreTO getStoreTO(DataStore store) { s_logger.info(" Host Address: " + primaryStore.getHostAddress()); s_logger.info(" Path: " + primaryStore.getPath()); s_logger.info(" Port: " + primaryStore.getPort()); - s_logger.info(" Details keys: " + (poolDetails != null ? poolDetails.keySet() : "null")); - if (poolDetails != null && poolDetails.containsKey("managedStoreTarget")) { - s_logger.info(" managedStoreTarget: " + poolDetails.get("managedStoreTarget")); - } + s_logger.info(" Final details in storeTO: " + storeTO.getDetails()); return storeTO; } @@ -140,6 +137,12 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet if (dataObject.getType() == DataObjectType.VOLUME) { path = createCloudStackVolumeForTypeVolume(dataStore, dataObject); createCmdResult = new CreateCmdResult(path, new Answer(null, true, null)); + } else if (dataObject.getType() == DataObjectType.TEMPLATE) { + // For templates, return the UUID as the install path + // This will be used as the filename for the qcow2 file on NFS + path = dataObject.getUuid(); + s_logger.info("createAsync: Template [{}] will use UUID as install path: {}", ((TemplateInfo)dataObject).getName(), path); + createCmdResult = new CreateCmdResult(path, new Answer(null, true, null)); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; s_logger.error(errMsg); From cbc44ba8ecb5b1f93037318eef10bd8231806713 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 21 Nov 2025 04:36:17 +0530 Subject: [PATCH 15/29] vm instance creation test7 --- .../storage/driver/OntapPrimaryDatastoreDriver.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 285caf408eca..e71a938405f3 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -231,10 +231,11 @@ private String createManagedNfsVolume(DataStore dataStore, DataObject dataObject volume.setPoolId(dataStore.getId()); volume.setPath(volumeUuid); // Filename for qcow2 file - // CRITICAL: Store junction path in _iScsiName field - // CloudStack will use this in AttachCommand as DiskTO.MOUNT_POINT - // ManagedNfsStorageAdaptor will mount: nfs://hostAddress/junctionPath to /mnt/volumeUuid - volume.set_iScsiName(junctionPath); + // CRITICAL: For ManagedNFS, _iScsiName must be the volume UUID, NOT the junction path + // CloudStack uses _iScsiName as MANAGED_STORE_TARGET -> volPath -> libvirt pool name + // Libvirt pool names cannot contain '/', so we use the UUID + // The junction path comes from storage_pool_details.mountpoint (set in getStoreTO) + volume.set_iScsiName(volumeUuid); volumeDao.update(volume.getId(), volume); From 7db78a13f7066798e9011f4ecd04f6d73ddbf79d Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 21 Nov 2025 14:50:39 +0530 Subject: [PATCH 16/29] vm instance creation test6 --- .../driver/OntapPrimaryDatastoreDriver.java | 72 ++++++++++++++++++- 1 file changed, 69 insertions(+), 3 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index e71a938405f3..8f1eeaba1543 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -22,6 +22,9 @@ import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; +import org.apache.cloudstack.storage.command.CreateObjectCommand; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import com.cloud.exception.InvalidParameterValueException; import com.cloud.host.Host; import com.cloud.storage.Storage; @@ -68,6 +71,8 @@ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver { @Inject private StoragePoolDetailsDao storagePoolDetailsDao; @Inject private PrimaryDataStoreDao storagePoolDao; @Inject private com.cloud.storage.dao.VolumeDao volumeDao; + @Inject private EndPointSelector epSelector; + @Override public Map getCapabilities() { s_logger.trace("OntapPrimaryDatastoreDriver: getCapabilities: Called"); @@ -161,12 +166,60 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet } } + /** + * Sends CreateObjectCommand to KVM agent to create qcow2 file using qemu-img. + * The KVM agent will call: + * - KVMStorageProcessor.createVolume() + * - primaryPool.createPhysicalDisk() + * - LibvirtStorageAdaptor.createPhysicalDiskByQemuImg() + * Which executes: qemu-img create -f qcow2 /mnt// + * + * @param volumeInfo Volume information with size, format, uuid + * @return Answer from KVM agent indicating success/failure + */ + private Answer createVolumeOnKVMHost(VolumeInfo volumeInfo) { + try { + s_logger.info("createVolumeOnKVMHost: Sending CreateObjectCommand to KVM agent for volume: {}", volumeInfo.getUuid()); + + // Create command with volume TO (Transfer Object) + CreateObjectCommand cmd = new CreateObjectCommand(volumeInfo.getTO()); + + // Select endpoint (KVM agent) to send command + // epSelector will find an appropriate KVM host in the cluster/pod + EndPoint ep = epSelector.select(volumeInfo); + + if (ep == null) { + String errMsg = "No remote endpoint to send CreateObjectCommand, check if host is up"; + s_logger.error(errMsg); + return new Answer(cmd, false, errMsg); + } + + s_logger.info("createVolumeOnKVMHost: Sending command to endpoint: {}", ep.getHostAddr()); + + // Send command to KVM agent and wait for response + Answer answer = ep.sendMessage(cmd); + + if (answer != null && answer.getResult()) { + s_logger.info("createVolumeOnKVMHost: Successfully created qcow2 file on KVM host"); + } else { + s_logger.error("createVolumeOnKVMHost: Failed to create qcow2 file: {}", + answer != null ? answer.getDetails() : "null answer"); + } + + return answer; + + } catch (Exception e) { + s_logger.error("createVolumeOnKVMHost: Exception sending CreateObjectCommand", e); + return new Answer(null, false, e.toString()); + } + } + /** * Creates CloudStack volume based on storage protocol type (NFS or iSCSI). * * For Managed NFS (Option 2 Implementation): - * - Returns only UUID without creating qcow2 file - * - KVM hypervisor creates qcow2 file automatically during VM deployment + * - Creates ONTAP volume and sets metadata in CloudStack DB + * - Sends CreateObjectCommand to KVM host to create qcow2 file using qemu-img * - ONTAP volume provides the backing NFS storage * * For iSCSI/Block Storage: @@ -184,7 +237,20 @@ private String createCloudStackVolumeForTypeVolume(DataStore dataStore, DataObje String protocol = details.get(Constants.PROTOCOL); if (ProtocolType.NFS.name().equalsIgnoreCase(protocol)) { - return createManagedNfsVolume(dataStore, dataObject, storagePool); + // Step 1: Create ONTAP volume and set metadata + String volumeUuid = createManagedNfsVolume(dataStore, dataObject, storagePool); + + // Step 2: Send command to KVM host to create qcow2 file using qemu-img + VolumeInfo volumeInfo = (VolumeInfo) dataObject; + Answer answer = createVolumeOnKVMHost(volumeInfo); + + if (answer == null || !answer.getResult()) { + String errMsg = answer != null ? answer.getDetails() : "Failed to create qcow2 on KVM host"; + s_logger.error("createCloudStackVolumeForTypeVolume: " + errMsg); + throw new CloudRuntimeException(errMsg); + } + + return volumeUuid; } else if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) { return createManagedBlockVolume(dataStore, dataObject, storagePool, details); } else { From 006439045c0d8439dcd0027c9c3072954effdb0d Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 21 Nov 2025 15:00:54 +0530 Subject: [PATCH 17/29] vm instance creation test7 --- .../storage/driver/OntapPrimaryDatastoreDriver.java | 9 ++++----- .../lifecycle/OntapPrimaryDatastoreLifecycle.java | 5 ++++- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 8f1eeaba1543..32a2bd728da3 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -293,14 +293,13 @@ private String createManagedNfsVolume(DataStore dataStore, DataObject dataObject String junctionPath = storagePool.getPath(); // Update volume metadata in CloudStack database - volume.setPoolType(Storage.StoragePoolType.ManagedNFS); + // Use NetworkFilesystem (not ManagedNFS) - matches pool type set during pool creation + volume.setPoolType(Storage.StoragePoolType.NetworkFilesystem); volume.setPoolId(dataStore.getId()); volume.setPath(volumeUuid); // Filename for qcow2 file - // CRITICAL: For ManagedNFS, _iScsiName must be the volume UUID, NOT the junction path - // CloudStack uses _iScsiName as MANAGED_STORE_TARGET -> volPath -> libvirt pool name - // Libvirt pool names cannot contain '/', so we use the UUID - // The junction path comes from storage_pool_details.mountpoint (set in getStoreTO) + // For NetworkFilesystem with managed=true, _iScsiName should be set to volume UUID + // This maintains compatibility with managed storage behavior volume.set_iScsiName(volumeUuid); volumeDao.update(volume.getId(), volume); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index e4cd1c253aad..6105fdbe3a5b 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -192,7 +192,10 @@ public DataStore initialize(Map dsInfos) { ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); switch (protocol) { case NFS: - parameters.setType(Storage.StoragePoolType.ManagedNFS); + // Use NetworkFilesystem (not ManagedNFS) with managed=true + // This routes to LibvirtStorageAdaptor which has full qemu-img support + // Same pattern as CloudByte/Elastistor plugin + parameters.setType(Storage.StoragePoolType.NetworkFilesystem); // Path should be just the NFS export path (junction path), NOT host:path // CloudStack will construct the full mount path as: hostAddress + ":" + path path = "/" + storagePoolName; From 982843b8b9749080add8773675747a98353719cf Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Thu, 4 Dec 2025 19:33:40 +0530 Subject: [PATCH 18/29] "Update code as per coding principle" --- plugins/storage/volume/ontap/pom.xml | 2 +- .../driver/OntapPrimaryDatastoreDriver.java | 255 ++---------------- .../storage/feign/FeignConfiguration.java | 7 - .../OntapPrimaryDatastoreLifecycle.java | 13 - .../storage/listener/OntapHostListener.java | 4 - .../storage/service/StorageStrategy.java | 13 +- .../storage/service/UnifiedNASStrategy.java | 130 +++++---- .../service/model/CloudStackVolume.java | 25 +- .../cloudstack/storage/utils/Utility.java | 33 +-- 9 files changed, 110 insertions(+), 372 deletions(-) diff --git a/plugins/storage/volume/ontap/pom.xml b/plugins/storage/volume/ontap/pom.xml index 3da605f12f9d..5cd012f82f43 100644 --- a/plugins/storage/volume/ontap/pom.xml +++ b/plugins/storage/volume/ontap/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack-plugins - 4.22.0.0-SNAPSHOT + 4.23.0.0-SNAPSHOT ../../../pom.xml diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 32a2bd728da3..b300fade438c 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -22,15 +22,12 @@ import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; -import org.apache.cloudstack.storage.command.CreateObjectCommand; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import com.cloud.exception.InvalidParameterValueException; import com.cloud.host.Host; import com.cloud.storage.Storage; import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; -import com.cloud.storage.VolumeVO; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; @@ -39,7 +36,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; @@ -49,7 +45,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; @@ -78,6 +73,7 @@ public Map getCapabilities() { s_logger.trace("OntapPrimaryDatastoreDriver: getCapabilities: Called"); Map mapCapabilities = new HashMap<>(); // RAW managed initial implementation: snapshot features not yet supported + // TODO Set it to false once we start supporting snapshot feature mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.FALSE.toString()); mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.FALSE.toString()); @@ -90,37 +86,7 @@ public DataTO getTO(DataObject data) { } @Override - public DataStoreTO getStoreTO(DataStore store) { - // Load storage pool details from database (includes "mountpoint" added during pool creation) - Map poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(store.getId()); - - // Set details on the store before creating PrimaryDataStoreTO - // This ensures PrimaryDataStoreTO constructor gets the details from database - PrimaryDataStore primaryStore = (PrimaryDataStore) store; - if (poolDetails != null && !poolDetails.isEmpty()) { - // Merge existing details (if any) with database details - Map existingDetails = primaryStore.getDetails(); - if (existingDetails == null) { - primaryStore.setDetails(poolDetails); - } else { - // Merge: database details take precedence - Map mergedDetails = new HashMap<>(existingDetails); - mergedDetails.putAll(poolDetails); - primaryStore.setDetails(mergedDetails); - } - } - - // Now create PrimaryDataStoreTO - it will get details from primaryStore.getDetails() - PrimaryDataStoreTO storeTO = new PrimaryDataStoreTO(primaryStore); - - s_logger.info("OntapPrimaryDatastoreDriver: getStoreTO: Created PrimaryDataStoreTO for pool: " + store.getName()); - s_logger.info(" Pool UUID: " + store.getUuid()); - s_logger.info(" Host Address: " + primaryStore.getHostAddress()); - s_logger.info(" Path: " + primaryStore.getPath()); - s_logger.info(" Port: " + primaryStore.getPort()); - s_logger.info(" Final details in storeTO: " + storeTO.getDetails()); - return storeTO; - } + public DataStoreTO getStoreTO(DataStore store) { return null; } @Override public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { @@ -140,13 +106,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet s_logger.info("createAsync: Started for data store [{}] and data object [{}] of type [{}]", dataStore, dataObject, dataObject.getType()); if (dataObject.getType() == DataObjectType.VOLUME) { - path = createCloudStackVolumeForTypeVolume(dataStore, dataObject); - createCmdResult = new CreateCmdResult(path, new Answer(null, true, null)); - } else if (dataObject.getType() == DataObjectType.TEMPLATE) { - // For templates, return the UUID as the install path - // This will be used as the filename for the qcow2 file on NFS - path = dataObject.getUuid(); - s_logger.info("createAsync: Template [{}] will use UUID as install path: {}", ((TemplateInfo)dataObject).getName(), path); + path = createCloudStackVolumeForTypeVolume(dataStore, (VolumeInfo)dataObject); createCmdResult = new CreateCmdResult(path, new Answer(null, true, null)); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; @@ -160,216 +120,34 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet createCmdResult.setResult(e.toString()); } finally { if (createCmdResult != null && createCmdResult.isSuccess()) { - s_logger.info("createAsync: Volume metadata created successfully. Path: {}", path); + s_logger.info("createAsync: Volume created successfully. Path: {}", path); } callback.complete(createCmdResult); } } - /** - * Sends CreateObjectCommand to KVM agent to create qcow2 file using qemu-img. - * The KVM agent will call: - * - KVMStorageProcessor.createVolume() - * - primaryPool.createPhysicalDisk() - * - LibvirtStorageAdaptor.createPhysicalDiskByQemuImg() - * Which executes: qemu-img create -f qcow2 /mnt// - * - * @param volumeInfo Volume information with size, format, uuid - * @return Answer from KVM agent indicating success/failure - */ - private Answer createVolumeOnKVMHost(VolumeInfo volumeInfo) { - try { - s_logger.info("createVolumeOnKVMHost: Sending CreateObjectCommand to KVM agent for volume: {}", volumeInfo.getUuid()); - - // Create command with volume TO (Transfer Object) - CreateObjectCommand cmd = new CreateObjectCommand(volumeInfo.getTO()); - - // Select endpoint (KVM agent) to send command - // epSelector will find an appropriate KVM host in the cluster/pod - EndPoint ep = epSelector.select(volumeInfo); - - if (ep == null) { - String errMsg = "No remote endpoint to send CreateObjectCommand, check if host is up"; - s_logger.error(errMsg); - return new Answer(cmd, false, errMsg); - } - - s_logger.info("createVolumeOnKVMHost: Sending command to endpoint: {}", ep.getHostAddr()); - - // Send command to KVM agent and wait for response - Answer answer = ep.sendMessage(cmd); - - if (answer != null && answer.getResult()) { - s_logger.info("createVolumeOnKVMHost: Successfully created qcow2 file on KVM host"); - } else { - s_logger.error("createVolumeOnKVMHost: Failed to create qcow2 file: {}", - answer != null ? answer.getDetails() : "null answer"); - } - - return answer; - - } catch (Exception e) { - s_logger.error("createVolumeOnKVMHost: Exception sending CreateObjectCommand", e); - return new Answer(null, false, e.toString()); - } - } - - /** - * Creates CloudStack volume based on storage protocol type (NFS or iSCSI). - * - * For Managed NFS (Option 2 Implementation): - * - Creates ONTAP volume and sets metadata in CloudStack DB - * - Sends CreateObjectCommand to KVM host to create qcow2 file using qemu-img - * - ONTAP volume provides the backing NFS storage - * - * For iSCSI/Block Storage: - * - Creates LUN via ONTAP REST API - * - Returns LUN path for direct attachment - */ - private String createCloudStackVolumeForTypeVolume(DataStore dataStore, DataObject dataObject) { + private String createCloudStackVolumeForTypeVolume(DataStore dataStore, VolumeInfo volumeObject) { StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); if(storagePool == null) { - s_logger.error("createCloudStackVolumeForTypeVolume: Storage Pool not found for id: {}", dataStore.getId()); - throw new CloudRuntimeException("createCloudStackVolumeForTypeVolume: Storage Pool not found for id: " + dataStore.getId()); + s_logger.error("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); + throw new CloudRuntimeException("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); } - Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId()); - String protocol = details.get(Constants.PROTOCOL); - - if (ProtocolType.NFS.name().equalsIgnoreCase(protocol)) { - // Step 1: Create ONTAP volume and set metadata - String volumeUuid = createManagedNfsVolume(dataStore, dataObject, storagePool); - - // Step 2: Send command to KVM host to create qcow2 file using qemu-img - VolumeInfo volumeInfo = (VolumeInfo) dataObject; - Answer answer = createVolumeOnKVMHost(volumeInfo); - - if (answer == null || !answer.getResult()) { - String errMsg = answer != null ? answer.getDetails() : "Failed to create qcow2 on KVM host"; - s_logger.error("createCloudStackVolumeForTypeVolume: " + errMsg); - throw new CloudRuntimeException(errMsg); - } - - return volumeUuid; - } else if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) { - return createManagedBlockVolume(dataStore, dataObject, storagePool, details); - } else { - String errMsg = String.format("createCloudStackVolumeForTypeVolume: Unsupported protocol [%s]", protocol); - s_logger.error(errMsg); - throw new CloudRuntimeException(errMsg); - } - } - - /** - * Creates Managed NFS Volume with ONTAP backing storage. - * - * Architecture: 1 CloudStack Storage Pool = 1 ONTAP Volume (shared by all volumes) - * - * Flow: - * 1. createAsync() stores volume metadata and NFS mount point - * 2. Volume attach triggers ManagedNfsStorageAdaptor.connectPhysicalDisk() - * 3. KVM mounts: nfs://nfsServer/junctionPath to /mnt/volumeUuid - * 4. Libvirt creates qcow2 file via storageVolCreateXML() - * 5. File created at: /vol/ontap_volume/volumeUuid (on ONTAP) - * - * Key Details: - * - All volumes in same pool share the same ONTAP volume NFS export - * - Each volume gets separate libvirt mount point: /mnt/ - * - All qcow2 files stored in same ONTAP volume: /vol// - * - volume._iScsiName stores the NFS junction path (pool.path) - * - * @param dataStore CloudStack data store (storage pool) - * @param dataObject Volume data object - * @param storagePool Storage pool VO - * @return Volume UUID (used as filename for qcow2 file) - */ - private String createManagedNfsVolume(DataStore dataStore, DataObject dataObject, StoragePoolVO storagePool) { - VolumeInfo volumeInfo = (VolumeInfo) dataObject; - VolumeVO volume = volumeDao.findById(volumeInfo.getId()); - String volumeUuid = volumeInfo.getUuid(); - - // Get the NFS junction path from storage pool - // This is the path that was set during pool creation (e.g., "/my_pool_volume") - String junctionPath = storagePool.getPath(); - - // Update volume metadata in CloudStack database - // Use NetworkFilesystem (not ManagedNFS) - matches pool type set during pool creation - volume.setPoolType(Storage.StoragePoolType.NetworkFilesystem); - volume.setPoolId(dataStore.getId()); - volume.setPath(volumeUuid); // Filename for qcow2 file - - // For NetworkFilesystem with managed=true, _iScsiName should be set to volume UUID - // This maintains compatibility with managed storage behavior - volume.set_iScsiName(volumeUuid); - - volumeDao.update(volume.getId(), volume); - - s_logger.info("ONTAP Managed NFS Volume Created: uuid={}, path={}, junctionPath={}, format=QCOW2, " + - "pool={}, size={}GB. Libvirt will create qcow2 file at mount time.", - volumeUuid, volumeUuid, junctionPath, storagePool.getName(), - volumeInfo.getSize() / (1024 * 1024 * 1024)); - - // Optional: Prepare ONTAP volume for optimal qcow2 storage (future enhancement) - // prepareOntapVolumeForQcow2Storage(dataStore, volumeInfo); - - return volumeUuid; - } - - /** - * Creates iSCSI/Block volume by calling ONTAP REST API to create a LUN. - * - * For block storage (iSCSI), the storage provider must create the LUN - * before CloudStack can use it. This is different from NFS where the - * hypervisor creates the file. - * - * @param dataStore CloudStack data store - * @param dataObject Volume data object - * @param storagePool Storage pool VO - * @param details Storage pool details containing ONTAP connection info - * @return LUN path/name for iSCSI attachment - */ - private String createManagedBlockVolume(DataStore dataStore, DataObject dataObject, - StoragePoolVO storagePool, Map details) { StorageStrategy storageStrategy = getStrategyByStoragePoolDetails(details); - - s_logger.info("createManagedBlockVolume: Creating iSCSI LUN on ONTAP SVM [{}]", details.get(Constants.SVM_NAME)); - - CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, (VolumeInfo) dataObject); - + s_logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(Constants.SVM_NAME)); + CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, volumeObject); CloudStackVolume cloudStackVolume = storageStrategy.createCloudStackVolume(cloudStackVolumeRequest); - - if (cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) { - String lunPath = cloudStackVolume.getLun().getName(); - s_logger.info("createManagedBlockVolume: iSCSI LUN created successfully: {}", lunPath); - return lunPath; + if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL)) && cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) { + return cloudStackVolume.getLun().getName(); + } else if (ProtocolType.NFS.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + return volumeObject.getUuid(); // return the volume UUID for agent as path for mounting } else { - String errMsg = String.format("createManagedBlockVolume: LUN creation failed for volume [%s]. " + - "LUN or LUN path is null.", dataObject.getUuid()); + String errMsg = "createCloudStackVolumeForTypeVolume: Volume creation failed. Lun or Lun Path is null for dataObject: " + volumeObject; s_logger.error(errMsg); throw new CloudRuntimeException(errMsg); } } - /** - * Optional: Prepares ONTAP volume for optimal qcow2 file storage. - * - * Future enhancements can include: - * - Enable compression for qcow2 files - * - Set QoS policies - * - Enable deduplication - * - Configure snapshot policies - * - * This is a placeholder for ONTAP-specific optimizations. - */ - private void prepareOntapVolumeForQcow2Storage(DataStore dataStore, VolumeInfo volumeInfo) { - // TODO: Implement ONTAP volume optimizations - // Examples: - // - storageStrategy.enableCompression(volumePath) - // - storageStrategy.setQosPolicy(volumePath, iops) - // - storageStrategy.enableDeduplication(volumePath) - s_logger.debug("prepareOntapVolumeForQcow2Storage: Placeholder for future ONTAP optimizations"); - } - @Override public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback callback) { CommandResult commandResult = new CommandResult(); @@ -379,6 +157,10 @@ public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallbac } if (data.getType() == DataObjectType.VOLUME) { StoragePoolVO storagePool = storagePoolDao.findById(store.getId()); + if(storagePool == null) { + s_logger.error("deleteAsync : Storage Pool not found for id: " + store.getId()); + throw new CloudRuntimeException("deleteAsync : Storage Pool not found for id: " + store.getId()); + } Map details = storagePoolDetailsDao.listDetailsKeyPairs(store.getId()); if (ProtocolType.NFS.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { // ManagedNFS qcow2 backing file deletion handled by KVM host/libvirt; nothing to do via ONTAP REST. @@ -535,4 +317,5 @@ private StorageStrategy getStrategyByStoragePoolDetails(Map deta throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed"); } } + } \ No newline at end of file diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java index e9c504e8de71..a1babad7ce09 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java @@ -144,16 +144,9 @@ public Object decode(Response response, Type type) throws IOException, DecodeExc try (InputStream bodyStream = response.body().asInputStream()) { json = new String(bodyStream.readAllBytes(), StandardCharsets.UTF_8); logger.debug("Decoding JSON response: {}", json); - logger.debug("Target type: {}", type); - logger.debug("About to call jsonMapper.readValue()..."); - Object result = null; try { - logger.debug("Calling jsonMapper.constructType()..."); var javaType = jsonMapper.getTypeFactory().constructType(type); - logger.debug("constructType() returned: {}", javaType); - - logger.debug("Calling jsonMapper.readValue() with json and javaType..."); result = jsonMapper.readValue(json, javaType); logger.debug("jsonMapper.readValue() completed successfully"); } catch (Throwable ex) { diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 6105fdbe3a5b..e16f991b32e1 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -192,12 +192,7 @@ public DataStore initialize(Map dsInfos) { ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); switch (protocol) { case NFS: - // Use NetworkFilesystem (not ManagedNFS) with managed=true - // This routes to LibvirtStorageAdaptor which has full qemu-img support - // Same pattern as CloudByte/Elastistor plugin parameters.setType(Storage.StoragePoolType.NetworkFilesystem); - // Path should be just the NFS export path (junction path), NOT host:path - // CloudStack will construct the full mount path as: hostAddress + ":" + path path = "/" + storagePoolName; s_logger.info("Setting NFS path for storage pool: " + path); host = "10.193.192.136"; // TODO hardcoded for now @@ -233,9 +228,7 @@ public DataStore initialize(Map dsInfos) { s_logger.error("createStorageVolume returned null for volume: " + storagePoolName); throw new CloudRuntimeException("Failed to create ONTAP volume: " + storagePoolName); } - s_logger.info("Volume object retrieved successfully. UUID: " + volume.getUuid() + ", Name: " + volume.getName()); - details.putIfAbsent(Constants.VOLUME_UUID, volume.getUuid()); details.putIfAbsent(Constants.VOLUME_NAME, volume.getName()); } catch (Exception e) { @@ -246,10 +239,6 @@ public DataStore initialize(Map dsInfos) { throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage"); } - // Add mountpoint detail for ManagedNFS - required by KVM agent's ManagedNfsStorageAdaptor - // The 'mountpoint' key is used by connectPhysicalDisk() to mount NFS export - details.put("mountpoint", path); - // Set parameters for primary data store parameters.setPort(Constants.ONTAP_PORT); parameters.setHost(host); @@ -293,7 +282,6 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); for (HostVO host : hostsToConnect) { - // TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster try { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { @@ -330,7 +318,6 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper strategy.createAccessGroup(accessGroupRequest); for (HostVO host : hostsToConnect) { - // TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster try { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java index 5b39e2ce5293..34067873bdf9 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -38,10 +38,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import com.cloud.host.dao.HostDao; -/** - * HypervisorHostListener implementation for ONTAP storage. - * Handles connecting/disconnecting hosts to/from ONTAP-backed storage pools. - */ public class OntapHostListener implements HypervisorHostListener { protected Logger logger = LogManager.getLogger(getClass()); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index b11c60e63385..561a6e52e6ac 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -150,23 +150,15 @@ public Volume createStorageVolume(String volumeName, Long size) { Volume volumeRequest = new Volume(); Svm svm = new Svm(); svm.setName(svmName); - Nas nas = new Nas(); - nas.setPath("/" + volumeName); + nas.setPath(Constants.PATH_SEPARATOR + volumeName); volumeRequest.setName(volumeName); volumeRequest.setSvm(svm); volumeRequest.setAggregates(aggregates); volumeRequest.setSize(size); - volumeRequest.setNas(nas); // be default if we don't set path , ONTAP create a volume with mount/junction path // TODO check if we need to append svm name or not - // since storage pool also cannot be duplicate so junction path can also be not duplicate so /volumeName will always be unique - // Make the POST API call to create the volume + volumeRequest.setNas(nas); try { - /* - ONTAP created a default rule of 0.0.0.0 if no export rule are defined while creating volume - and since in storage pool creation, cloudstack is not aware of the host , we can either create default or - permissive rule and later update it as part of attachCluster or attachZone implementation - */ JobResponse jobResponse = volumeFeignClient.createVolumeWithJob(authHeader, volumeRequest); if (jobResponse == null || jobResponse.getJob() == null) { throw new CloudRuntimeException("Failed to initiate volume creation for " + volumeName); @@ -201,7 +193,6 @@ public Volume createStorageVolume(String volumeName, Long size) { throw new CloudRuntimeException("Failed to create volume: " + e.getMessage()); } s_logger.info("Volume created successfully: " + volumeName); - // Below code is to update volume uuid to storage pool mapping once and used for all other workflow saving get volume call try { Map queryParams = Map.of(Constants.NAME, volumeName); s_logger.debug("Fetching volume details for: " + volumeName); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index de5dd9ffbe34..9ecbfecceb40 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -19,10 +19,17 @@ package org.apache.cloudstack.storage.service; +import com.cloud.agent.api.Answer; import com.cloud.host.HostVO; +import com.cloud.storage.Storage; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.exception.CloudRuntimeException; import feign.FeignException; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.feign.FeignClientFactory; import org.apache.cloudstack.storage.feign.client.JobFeignClient; import org.apache.cloudstack.storage.feign.client.NASFeignClient; @@ -56,15 +63,13 @@ public class UnifiedNASStrategy extends NASStrategy { private final NASFeignClient nasFeignClient; private final VolumeFeignClient volumeFeignClient; private final JobFeignClient jobFeignClient; - @Inject - private StoragePoolDetailsDao storagePoolDetailsDao; + @Inject private VolumeDao volumeDao; + @Inject private EndPointSelector epSelector; public UnifiedNASStrategy(OntapStorage ontapStorage) { super(ontapStorage); String baseURL = Constants.HTTPS + ontapStorage.getManagementLIF(); - // Initialize FeignClientFactory and create NAS client this.feignClientFactory = new FeignClientFactory(); - // NAS client uses export policy API endpoint this.nasFeignClient = feignClientFactory.createClient(NASFeignClient.class, baseURL); this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class,baseURL ); this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL ); @@ -77,23 +82,16 @@ public void setOntapStorage(OntapStorage ontapStorage) { @Override public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume) { s_logger.info("createCloudStackVolume: Create cloudstack volume " + cloudstackVolume); - // Skip ontap file creation for now -// try { -// boolean created = createFile(cloudstackVolume.getVolume().getUuid(),cloudstackVolume.getCloudstackVolName(), cloudstackVolume.getFile()); -// if(created){ -// s_logger.debug("Successfully created file in ONTAP under volume with path {} or name {} ", cloudstackVolume.getVolume().getUuid(), cloudstackVolume.getCloudstackVolName()); -// FileInfo responseFile = cloudstackVolume.getFile(); -// responseFile.setPath(cloudstackVolume.getCloudstackVolName()); -// }else { -// s_logger.error("File not created for volume {}", cloudstackVolume.getVolume().getUuid()); -// throw new CloudRuntimeException("File not created"); -// } -// -// }catch (Exception e) { -// s_logger.error("Exception occurred while creating file or dir: {}. Exception: {}", cloudstackVolume.getCloudstackVolName(), e.getMessage()); -// throw new CloudRuntimeException("Failed to create file: " + e.getMessage()); -// } - return cloudstackVolume; + // Step 1: set cloudstack volume metadata + String volumeUuid = updateCloudStackVolumeMetadata(cloudstackVolume.getDatastoreId(), cloudstackVolume.getVolumeInfo()); + // Step 2: Send command to KVM host to create qcow2 file using qemu-img + Answer answer = createVolumeOnKVMHost(cloudstackVolume.getVolumeInfo()); + if (answer == null || !answer.getResult()) { + String errMsg = answer != null ? answer.getDetails() : "Failed to create qcow2 on KVM host"; + s_logger.error("createCloudStackVolumeForTypeVolume: " + errMsg); + throw new CloudRuntimeException(errMsg); + } + return cloudstackVolume; } @Override @@ -115,7 +113,7 @@ CloudStackVolume getCloudStackVolume(CloudStackVolume cloudstackVolume) { @Override public AccessGroup createAccessGroup(AccessGroup accessGroup) { - + s_logger.info("createAccessGroup: Create access group {}: " , accessGroup); Map details = accessGroup.getPrimaryDataStoreInfo().getDetails(); String svmName = details.get(Constants.SVM_NAME); String volumeUUID = details.get(Constants.VOLUME_UUID); @@ -124,11 +122,10 @@ public AccessGroup createAccessGroup(AccessGroup accessGroup) { // Create the export policy ExportPolicy policyRequest = createExportPolicyRequest(accessGroup,svmName,volumeName); try { - createExportPolicy(svmName, policyRequest); - s_logger.info("ExportPolicy created: {}, now attaching this policy to storage pool volume", policyRequest.getName()); - + ExportPolicy createdPolicy = createExportPolicy(svmName, policyRequest); + s_logger.info("ExportPolicy created: {}, now attaching this policy to storage pool volume", createdPolicy.getName()); // attach export policy to volume of storage pool - assignExportPolicyToVolume(volumeUUID,policyRequest.getName()); + assignExportPolicyToVolume(volumeUUID,createdPolicy.getName()); s_logger.info("Successfully assigned exportPolicy {} to volume {}", policyRequest.getName(), volumeName); accessGroup.setPolicy(policyRequest); return accessGroup; @@ -166,15 +163,16 @@ void disableLogicalAccess(Map values) { } - private void createExportPolicy(String svmName, ExportPolicy policy) { + private ExportPolicy createExportPolicy(String svmName, ExportPolicy policy) { s_logger.info("Creating export policy: {} for SVM: {}", policy, svmName); try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); nasFeignClient.createExportPolicy(authHeader, policy); + OntapResponse policiesResponse = null; try { Map queryParams = Map.of(Constants.NAME, policy.getName()); - OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams); + policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams); if (policiesResponse == null || policiesResponse.getRecords().isEmpty()) { throw new CloudRuntimeException("Export policy " + policy.getName() + " was not created on ONTAP. " + "Received successful response but policy does not exist."); @@ -185,6 +183,7 @@ private void createExportPolicy(String svmName, ExportPolicy policy) { throw new CloudRuntimeException("Export policy creation verification failed: " + e.getMessage()); } s_logger.info("Export policy created successfully with name {}", policy.getName()); + return policiesResponse.getRecords().get(0); } catch (FeignException e) { s_logger.error("Failed to create export policy: {}", policy, e); throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage()); @@ -213,23 +212,11 @@ private void deleteExportPolicy(String svmName, String policyName) { } } - - private String addExportRule(String policyName, String clientMatch, String[] protocols, String[] roRule, String[] rwRule) { - return ""; - } - private void assignExportPolicyToVolume(String volumeUuid, String policyName) { s_logger.info("Assigning export policy: {} to volume: {}", policyName, volumeUuid); try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - Map queryParams = Map.of(Constants.NAME, policyName); - OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams); - if (policiesResponse == null || policiesResponse.getRecords().isEmpty()) { - s_logger.error("Export policy not found for assigning rule: {}", policyName); - throw new CloudRuntimeException("Export policy not found: " + policyName); - } - // Create Volume update object with NAS configuration Volume volumeUpdate = new Volume(); Nas nas = new Nas(); @@ -239,17 +226,11 @@ private void assignExportPolicyToVolume(String volumeUuid, String policyName) { volumeUpdate.setNas(nas); try { - /* - ONTAP created a default rule of 0.0.0.0 if no export rule are defined while creating volume - and since in storage pool creation, cloudstack is not aware of the host , we can either create default or - permissive rule and later update it as part of attachCluster or attachZone implementation - */ JobResponse jobResponse = volumeFeignClient.updateVolumeRebalancing(authHeader, volumeUuid, volumeUpdate); if (jobResponse == null || jobResponse.getJob() == null) { throw new CloudRuntimeException("Failed to attach policy " + policyName + "to volume " + volumeUuid); } String jobUUID = jobResponse.getJob().getUuid(); - //Create URI for GET Job API int jobRetryCount = 0; Job createVolumeJob = null; @@ -258,7 +239,6 @@ private void assignExportPolicyToVolume(String volumeUuid, String policyName) { s_logger.error("Job to update volume " + volumeUuid + " did not complete within expected time."); throw new CloudRuntimeException("Job to update volume " + volumeUuid + " did not complete within expected time."); } - try { createVolumeJob = jobFeignClient.getJobByUUID(authHeader, jobUUID); if (createVolumeJob == null) { @@ -269,7 +249,6 @@ private void assignExportPolicyToVolume(String volumeUuid, String policyName) { } catch (FeignException.FeignClientException e) { throw new CloudRuntimeException("Failed to fetch job status: " + e.getMessage()); } - jobRetryCount++; Thread.sleep(Constants.CREATE_VOLUME_CHECK_SLEEP_TIME); // Sleep for 2 seconds before polling again } @@ -277,7 +256,6 @@ private void assignExportPolicyToVolume(String volumeUuid, String policyName) { s_logger.error("Exception while updating volume: ", e); throw new CloudRuntimeException("Failed to update volume: " + e.getMessage()); } - s_logger.info("Export policy successfully assigned to volume: {}", volumeUuid); } catch (FeignException e) { s_logger.error("Failed to assign export policy to volume: {}", volumeUuid, e); @@ -290,7 +268,6 @@ private void assignExportPolicyToVolume(String volumeUuid, String policyName) { private boolean createFile(String volumeUuid, String filePath, FileInfo fileInfo) { s_logger.info("Creating file: {} in volume: {}", filePath, volumeUuid); - try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); nasFeignClient.createFile(authHeader, volumeUuid, filePath, fileInfo); @@ -307,7 +284,6 @@ private boolean createFile(String volumeUuid, String filePath, FileInfo fileInfo private boolean deleteFile(String volumeUuid, String filePath) { s_logger.info("Deleting file: {} from volume: {}", filePath, volumeUuid); - try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); nasFeignClient.deleteFile(authHeader, volumeUuid, filePath); @@ -324,7 +300,6 @@ private boolean deleteFile(String volumeUuid, String filePath) { private OntapResponse getFileInfo(String volumeUuid, String filePath) { s_logger.debug("Getting file info for: {} in volume: {}", filePath, volumeUuid); - try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); OntapResponse response = nasFeignClient.getFileResponse(authHeader, volumeUuid, filePath); @@ -345,7 +320,6 @@ private OntapResponse getFileInfo(String volumeUuid, String filePath) private boolean updateFile(String volumeUuid, String filePath, FileInfo fileInfo) { s_logger.info("Updating file: {} in volume: {}", filePath, volumeUuid); - try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); nasFeignClient.updateFile( authHeader, volumeUuid, filePath, fileInfo); @@ -386,9 +360,9 @@ private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String sv } exportRule.setClients(exportClients); exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.any)); - exportRule.setRoRule(List.of("sys")); // Use sys (Unix UID/GID) authentication for NFS - exportRule.setRwRule(List.of("sys")); // Use sys (Unix UID/GID) authentication for NFS - exportRule.setSuperuser(List.of("sys")); // Allow root/superuser access with sys auth + exportRule.setRoRule(List.of("sys")); + exportRule.setRwRule(List.of("sys")); + exportRule.setSuperuser(List.of("sys")); rules.add(exportRule); Svm svm = new Svm(); @@ -399,4 +373,46 @@ private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String sv return exportPolicy; } + + private String updateCloudStackVolumeMetadata(String dataStoreId, VolumeInfo volumeInfo) { + s_logger.info("createManagedNfsVolume called with datastoreID: {} volumeInfo: {} ", dataStoreId, volumeInfo ); + VolumeVO volume = volumeDao.findById(volumeInfo.getId()); + String volumeUuid = volumeInfo.getUuid(); + volume.setPoolType(Storage.StoragePoolType.NetworkFilesystem); + volume.setPoolId(Long.parseLong(dataStoreId)); //need to check if volume0 already has this data filled + volume.setPath(volumeUuid); // Filename for qcow2 file + volumeDao.update(volume.getId(), volume); + return volumeUuid; + } + + private Answer createVolumeOnKVMHost(VolumeInfo volumeInfo) { + s_logger.info("createVolumeOnKVMHost called with volumeInfo: {} ", volumeInfo); + + try { + s_logger.info("createVolumeOnKVMHost: Sending CreateObjectCommand to KVM agent for volume: {}", volumeInfo.getUuid()); + // Create command with volume TO (Transfer Object) + CreateObjectCommand cmd = new CreateObjectCommand(volumeInfo.getTO()); + // Select endpoint (KVM agent) to send command + // epSelector will find an appropriate KVM host in the cluster/pod + EndPoint ep = epSelector.select(volumeInfo); + if (ep == null) { + String errMsg = "No remote endpoint to send CreateObjectCommand, check if host is up"; + s_logger.error(errMsg); + return new Answer(cmd, false, errMsg); + } + s_logger.info("createVolumeOnKVMHost: Sending command to endpoint: {}", ep.getHostAddr()); + // Send command to KVM agent and wait for response + Answer answer = ep.sendMessage(cmd); + if (answer != null && answer.getResult()) { + s_logger.info("createVolumeOnKVMHost: Successfully created qcow2 file on KVM host"); + } else { + s_logger.error("createVolumeOnKVMHost: Failed to create qcow2 file: {}", + answer != null ? answer.getDetails() : "null answer"); + } + return answer; + } catch (Exception e) { + s_logger.error("createVolumeOnKVMHost: Exception sending CreateObjectCommand", e); + return new Answer(null, false, e.toString()); + } + } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java index 694c4a2c126f..269c3d9a1d17 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java @@ -19,6 +19,9 @@ package org.apache.cloudstack.storage.service.model; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.feign.model.FileInfo; import org.apache.cloudstack.storage.feign.model.Lun; import org.apache.cloudstack.storage.feign.model.Volume; @@ -27,10 +30,8 @@ public class CloudStackVolume { private FileInfo file; private Lun lun; - private Volume volume; - // will be replaced after testing - private String cloudstackVolName; - + private String datastoreId; + private VolumeInfo volumeInfo; public FileInfo getFile() { return file; } @@ -46,16 +47,16 @@ public Lun getLun() { public void setLun(Lun lun) { this.lun = lun; } - public Volume getVolume() { - return volume; + public String getDatastoreId() { + return datastoreId; } - public void setVolume(Volume volume) { - this.volume = volume; + public void setDatastoreId(String datastoreId) { + this.datastoreId = datastoreId; } - public String getCloudstackVolName() { - return cloudstackVolName; + public VolumeInfo getVolumeInfo() { + return volumeInfo; } - public void setCloudstackVolName(String cloudstackVolName) { - this.cloudstackVolName = cloudstackVolName; + public void setVolumeInfo(VolumeInfo volumeInfot) { + this.volumeInfo = volumeInfot; } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index b1f50ee513be..c846302317f1 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -58,20 +58,6 @@ public static String generateAuthHeader (String username, String password) { return BASIC + StringUtils.SPACE + new String(encodedBytes); } - /** - * Creates CloudStackVolume request object for ONTAP REST API calls. - * - * IMPORTANT: For Managed NFS (Option 2 Implementation): - * - The NFS case below is DEPRECATED and NOT USED - * - OntapPrimaryDatastoreDriver.createManagedNfsVolume() handles NFS volumes - * - It returns UUID only without creating files (KVM creates qcow2 automatically) - * - This method is ONLY used for iSCSI/block storage volumes - * - * @param storagePool Storage pool information - * @param details Storage pool details with ONTAP connection info - * @param volumeObject Volume information - * @return CloudStackVolume request for ONTAP REST API - */ public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, VolumeInfo volumeObject) { CloudStackVolume cloudStackVolumeRequest = null; @@ -79,24 +65,9 @@ public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePo ProtocolType protocolType = ProtocolType.valueOf(protocol); switch (protocolType) { case NFS: - // DEPRECATED: This NFS case is NOT USED in Option 2 Implementation - // For Managed NFS, OntapPrimaryDatastoreDriver.createManagedNfsVolume() - // returns UUID only and lets KVM create qcow2 files automatically. - // This legacy code remains for reference but is bypassed in current implementation. - s_logger.warn("createCloudStackVolumeRequestByProtocol: NFS case should not be called. " + - "Use OntapPrimaryDatastoreDriver.createManagedNfsVolume() instead."); cloudStackVolumeRequest = new CloudStackVolume(); - FileInfo file = new FileInfo(); - file.setSize(volumeObject.getSize()); - file.setUnixPermissions(755); - file.setType(FileInfo.TypeEnum.FILE); - - Volume poolVolume = new Volume(); - poolVolume.setName(details.get(Constants.VOLUME_NAME)); - poolVolume.setUuid(details.get(Constants.VOLUME_UUID)); - cloudStackVolumeRequest.setVolume(poolVolume); - cloudStackVolumeRequest.setFile(file); - cloudStackVolumeRequest.setCloudstackVolName(volumeObject.getName()); + cloudStackVolumeRequest.setDatastoreId(String.valueOf(storagePool.getId())); + cloudStackVolumeRequest.setVolumeInfo(volumeObject); break; case ISCSI: cloudStackVolumeRequest = new CloudStackVolume(); From 4ad6c71e02a8196ba8583bef0206b0cd51fa2b5c Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Thu, 4 Dec 2025 19:35:05 +0530 Subject: [PATCH 19/29] latest all --- .../java/com/cloud/template/TemplateManager.java | 0 engine/schema/pom.xml | 0 .../com/cloud/network/as/dao/CounterDaoImpl.java | 0 .../java/com/cloud/network/dao/NetworkDaoImpl.java | 0 .../java/com/cloud/storage/dao/VolumeDaoImpl.java | 0 engine/storage/object/pom.xml | 0 .../storage/snapshot/DefaultSnapshotStrategy.java | 0 .../cloudstack/metrics/MetricsServiceImplTest.java | 0 .../network-elements/elastic-loadbalancer/pom.xml | 0 .../driver/LinstorPrimaryDataStoreDriverImpl.java | 0 .../storage/secondary/cloud-install-sys-tmplt.py | 0 .../src/main/java/com/cloud/vm/UserVmManager.java | 0 systemvm/agent/conf/log4j-cloud.xml | 0 systemvm/agent/images/clr_button_hover.gif | Bin systemvm/agent/noVNC/app/images/expander.png | Bin systemvm/agent/noVNC/app/locale/es.json | 0 systemvm/agent/noVNC/app/locale/ja.json | 0 systemvm/agent/noVNC/app/sounds/CREDITS | 0 systemvm/agent/noVNC/core/util/events.js | 0 systemvm/agent/noVNC/core/websock.js | 0 .../agent/noVNC/vendor/pako/lib/zlib/adler32.js | 0 systemvm/agent/noVNC/vnc.html | 0 tools/apidoc/pom.xml | 0 23 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 engine/components-api/src/main/java/com/cloud/template/TemplateManager.java mode change 100644 => 100755 engine/schema/pom.xml mode change 100644 => 100755 engine/schema/src/main/java/com/cloud/network/as/dao/CounterDaoImpl.java mode change 100644 => 100755 engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java mode change 100644 => 100755 engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java mode change 100644 => 100755 engine/storage/object/pom.xml mode change 100644 => 100755 engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java mode change 100644 => 100755 plugins/metrics/src/test/java/org/apache/cloudstack/metrics/MetricsServiceImplTest.java mode change 100644 => 100755 plugins/network-elements/elastic-loadbalancer/pom.xml mode change 100644 => 100755 plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java mode change 100644 => 100755 scripts/storage/secondary/cloud-install-sys-tmplt.py mode change 100644 => 100755 server/src/main/java/com/cloud/vm/UserVmManager.java mode change 100644 => 100755 systemvm/agent/conf/log4j-cloud.xml mode change 100644 => 100755 systemvm/agent/images/clr_button_hover.gif mode change 100644 => 100755 systemvm/agent/noVNC/app/images/expander.png mode change 100644 => 100755 systemvm/agent/noVNC/app/locale/es.json mode change 100644 => 100755 systemvm/agent/noVNC/app/locale/ja.json mode change 100644 => 100755 systemvm/agent/noVNC/app/sounds/CREDITS mode change 100644 => 100755 systemvm/agent/noVNC/core/util/events.js mode change 100644 => 100755 systemvm/agent/noVNC/core/websock.js mode change 100644 => 100755 systemvm/agent/noVNC/vendor/pako/lib/zlib/adler32.js mode change 100644 => 100755 systemvm/agent/noVNC/vnc.html mode change 100644 => 100755 tools/apidoc/pom.xml diff --git a/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java b/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java old mode 100644 new mode 100755 diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml old mode 100644 new mode 100755 diff --git a/engine/schema/src/main/java/com/cloud/network/as/dao/CounterDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/as/dao/CounterDaoImpl.java old mode 100644 new mode 100755 diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java old mode 100644 new mode 100755 diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java old mode 100644 new mode 100755 diff --git a/engine/storage/object/pom.xml b/engine/storage/object/pom.xml old mode 100644 new mode 100755 diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java old mode 100644 new mode 100755 diff --git a/plugins/metrics/src/test/java/org/apache/cloudstack/metrics/MetricsServiceImplTest.java b/plugins/metrics/src/test/java/org/apache/cloudstack/metrics/MetricsServiceImplTest.java old mode 100644 new mode 100755 diff --git a/plugins/network-elements/elastic-loadbalancer/pom.xml b/plugins/network-elements/elastic-loadbalancer/pom.xml old mode 100644 new mode 100755 diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java old mode 100644 new mode 100755 diff --git a/scripts/storage/secondary/cloud-install-sys-tmplt.py b/scripts/storage/secondary/cloud-install-sys-tmplt.py old mode 100644 new mode 100755 diff --git a/server/src/main/java/com/cloud/vm/UserVmManager.java b/server/src/main/java/com/cloud/vm/UserVmManager.java old mode 100644 new mode 100755 diff --git a/systemvm/agent/conf/log4j-cloud.xml b/systemvm/agent/conf/log4j-cloud.xml old mode 100644 new mode 100755 diff --git a/systemvm/agent/images/clr_button_hover.gif b/systemvm/agent/images/clr_button_hover.gif old mode 100644 new mode 100755 diff --git a/systemvm/agent/noVNC/app/images/expander.png b/systemvm/agent/noVNC/app/images/expander.png old mode 100644 new mode 100755 diff --git a/systemvm/agent/noVNC/app/locale/es.json b/systemvm/agent/noVNC/app/locale/es.json old mode 100644 new mode 100755 diff --git a/systemvm/agent/noVNC/app/locale/ja.json b/systemvm/agent/noVNC/app/locale/ja.json old mode 100644 new mode 100755 diff --git a/systemvm/agent/noVNC/app/sounds/CREDITS b/systemvm/agent/noVNC/app/sounds/CREDITS old mode 100644 new mode 100755 diff --git a/systemvm/agent/noVNC/core/util/events.js b/systemvm/agent/noVNC/core/util/events.js old mode 100644 new mode 100755 diff --git a/systemvm/agent/noVNC/core/websock.js b/systemvm/agent/noVNC/core/websock.js old mode 100644 new mode 100755 diff --git a/systemvm/agent/noVNC/vendor/pako/lib/zlib/adler32.js b/systemvm/agent/noVNC/vendor/pako/lib/zlib/adler32.js old mode 100644 new mode 100755 diff --git a/systemvm/agent/noVNC/vnc.html b/systemvm/agent/noVNC/vnc.html old mode 100644 new mode 100755 diff --git a/tools/apidoc/pom.xml b/tools/apidoc/pom.xml old mode 100644 new mode 100755 From 000cc1d756c89f2d158343d6269794c20e03e160 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Thu, 4 Dec 2025 19:47:33 +0530 Subject: [PATCH 20/29] removed unrelated files --- .../java/com/cloud/template/TemplateManager.java | 0 engine/schema/pom.xml | 0 .../com/cloud/network/as/dao/CounterDaoImpl.java | 0 .../java/com/cloud/network/dao/NetworkDaoImpl.java | 0 .../java/com/cloud/storage/dao/VolumeDaoImpl.java | 0 engine/storage/object/pom.xml | 0 .../storage/snapshot/DefaultSnapshotStrategy.java | 0 .../cloudstack/metrics/MetricsServiceImplTest.java | 0 .../network-elements/elastic-loadbalancer/pom.xml | 0 .../driver/LinstorPrimaryDataStoreDriverImpl.java | 0 .../cloudstack/storage/feign/model/FileInfo.java | 0 .../cloudstack/storage/feign/model/LunMap.java | 0 .../apache/cloudstack/storage/feign/model/Qos.java | 0 .../storage/secondary/cloud-install-sys-tmplt.py | 0 .../src/main/java/com/cloud/vm/UserVmManager.java | 0 systemvm/agent/conf/log4j-cloud.xml | 0 systemvm/agent/images/clr_button_hover.gif | Bin systemvm/agent/noVNC/app/images/expander.png | Bin systemvm/agent/noVNC/app/locale/es.json | 0 systemvm/agent/noVNC/app/locale/ja.json | 0 systemvm/agent/noVNC/app/sounds/CREDITS | 0 systemvm/agent/noVNC/core/util/events.js | 0 systemvm/agent/noVNC/core/websock.js | 0 .../agent/noVNC/vendor/pako/lib/zlib/adler32.js | 0 systemvm/agent/noVNC/vnc.html | 0 tools/apidoc/pom.xml | 0 26 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 engine/components-api/src/main/java/com/cloud/template/TemplateManager.java mode change 100755 => 100644 engine/schema/pom.xml mode change 100755 => 100644 engine/schema/src/main/java/com/cloud/network/as/dao/CounterDaoImpl.java mode change 100755 => 100644 engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java mode change 100755 => 100644 engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java mode change 100755 => 100644 engine/storage/object/pom.xml mode change 100755 => 100644 engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java mode change 100755 => 100644 plugins/metrics/src/test/java/org/apache/cloudstack/metrics/MetricsServiceImplTest.java mode change 100755 => 100644 plugins/network-elements/elastic-loadbalancer/pom.xml mode change 100755 => 100644 plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java mode change 100755 => 100644 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java mode change 100755 => 100644 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunMap.java mode change 100755 => 100644 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Qos.java mode change 100755 => 100644 scripts/storage/secondary/cloud-install-sys-tmplt.py mode change 100755 => 100644 server/src/main/java/com/cloud/vm/UserVmManager.java mode change 100755 => 100644 systemvm/agent/conf/log4j-cloud.xml mode change 100755 => 100644 systemvm/agent/images/clr_button_hover.gif mode change 100755 => 100644 systemvm/agent/noVNC/app/images/expander.png mode change 100755 => 100644 systemvm/agent/noVNC/app/locale/es.json mode change 100755 => 100644 systemvm/agent/noVNC/app/locale/ja.json mode change 100755 => 100644 systemvm/agent/noVNC/app/sounds/CREDITS mode change 100755 => 100644 systemvm/agent/noVNC/core/util/events.js mode change 100755 => 100644 systemvm/agent/noVNC/core/websock.js mode change 100755 => 100644 systemvm/agent/noVNC/vendor/pako/lib/zlib/adler32.js mode change 100755 => 100644 systemvm/agent/noVNC/vnc.html mode change 100755 => 100644 tools/apidoc/pom.xml diff --git a/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java b/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java old mode 100755 new mode 100644 diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml old mode 100755 new mode 100644 diff --git a/engine/schema/src/main/java/com/cloud/network/as/dao/CounterDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/as/dao/CounterDaoImpl.java old mode 100755 new mode 100644 diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java old mode 100755 new mode 100644 diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java old mode 100755 new mode 100644 diff --git a/engine/storage/object/pom.xml b/engine/storage/object/pom.xml old mode 100755 new mode 100644 diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java old mode 100755 new mode 100644 diff --git a/plugins/metrics/src/test/java/org/apache/cloudstack/metrics/MetricsServiceImplTest.java b/plugins/metrics/src/test/java/org/apache/cloudstack/metrics/MetricsServiceImplTest.java old mode 100755 new mode 100644 diff --git a/plugins/network-elements/elastic-loadbalancer/pom.xml b/plugins/network-elements/elastic-loadbalancer/pom.xml old mode 100755 new mode 100644 diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java old mode 100755 new mode 100644 diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java old mode 100755 new mode 100644 diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunMap.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunMap.java old mode 100755 new mode 100644 diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Qos.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Qos.java old mode 100755 new mode 100644 diff --git a/scripts/storage/secondary/cloud-install-sys-tmplt.py b/scripts/storage/secondary/cloud-install-sys-tmplt.py old mode 100755 new mode 100644 diff --git a/server/src/main/java/com/cloud/vm/UserVmManager.java b/server/src/main/java/com/cloud/vm/UserVmManager.java old mode 100755 new mode 100644 diff --git a/systemvm/agent/conf/log4j-cloud.xml b/systemvm/agent/conf/log4j-cloud.xml old mode 100755 new mode 100644 diff --git a/systemvm/agent/images/clr_button_hover.gif b/systemvm/agent/images/clr_button_hover.gif old mode 100755 new mode 100644 diff --git a/systemvm/agent/noVNC/app/images/expander.png b/systemvm/agent/noVNC/app/images/expander.png old mode 100755 new mode 100644 diff --git a/systemvm/agent/noVNC/app/locale/es.json b/systemvm/agent/noVNC/app/locale/es.json old mode 100755 new mode 100644 diff --git a/systemvm/agent/noVNC/app/locale/ja.json b/systemvm/agent/noVNC/app/locale/ja.json old mode 100755 new mode 100644 diff --git a/systemvm/agent/noVNC/app/sounds/CREDITS b/systemvm/agent/noVNC/app/sounds/CREDITS old mode 100755 new mode 100644 diff --git a/systemvm/agent/noVNC/core/util/events.js b/systemvm/agent/noVNC/core/util/events.js old mode 100755 new mode 100644 diff --git a/systemvm/agent/noVNC/core/websock.js b/systemvm/agent/noVNC/core/websock.js old mode 100755 new mode 100644 diff --git a/systemvm/agent/noVNC/vendor/pako/lib/zlib/adler32.js b/systemvm/agent/noVNC/vendor/pako/lib/zlib/adler32.js old mode 100755 new mode 100644 diff --git a/systemvm/agent/noVNC/vnc.html b/systemvm/agent/noVNC/vnc.html old mode 100755 new mode 100644 diff --git a/tools/apidoc/pom.xml b/tools/apidoc/pom.xml old mode 100755 new mode 100644 From aae07474e682056386d216ca68a3003063ad44bb Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Thu, 4 Dec 2025 21:40:23 +0530 Subject: [PATCH 21/29] more fixes --- .../storage/driver/OntapPrimaryDatastoreDriver.java | 5 +---- .../java/org/apache/cloudstack/storage/feign/model/Svm.java | 2 +- .../cloudstack/storage/service/UnifiedNASStrategy.java | 4 ---- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index b300fade438c..5b1713260f8b 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -65,8 +65,6 @@ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver { @Inject private StoragePoolDetailsDao storagePoolDetailsDao; @Inject private PrimaryDataStoreDao storagePoolDao; - @Inject private com.cloud.storage.dao.VolumeDao volumeDao; - @Inject private EndPointSelector epSelector; @Override public Map getCapabilities() { @@ -317,5 +315,4 @@ private StorageStrategy getStrategyByStoragePoolDetails(Map deta throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed"); } } - -} \ No newline at end of file +} diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java index f1a226739365..65821739f1b2 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java @@ -144,4 +144,4 @@ public int hashCode() { @JsonInclude(JsonInclude.Include.NON_NULL) public static class Links { } -} \ No newline at end of file +} diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index 9ecbfecceb40..d70f894d7d9a 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -390,10 +390,7 @@ private Answer createVolumeOnKVMHost(VolumeInfo volumeInfo) { try { s_logger.info("createVolumeOnKVMHost: Sending CreateObjectCommand to KVM agent for volume: {}", volumeInfo.getUuid()); - // Create command with volume TO (Transfer Object) CreateObjectCommand cmd = new CreateObjectCommand(volumeInfo.getTO()); - // Select endpoint (KVM agent) to send command - // epSelector will find an appropriate KVM host in the cluster/pod EndPoint ep = epSelector.select(volumeInfo); if (ep == null) { String errMsg = "No remote endpoint to send CreateObjectCommand, check if host is up"; @@ -401,7 +398,6 @@ private Answer createVolumeOnKVMHost(VolumeInfo volumeInfo) { return new Answer(cmd, false, errMsg); } s_logger.info("createVolumeOnKVMHost: Sending command to endpoint: {}", ep.getHostAddr()); - // Send command to KVM agent and wait for response Answer answer = ep.sendMessage(cmd); if (answer != null && answer.getResult()) { s_logger.info("createVolumeOnKVMHost: Successfully created qcow2 file on KVM host"); From 23ddff914aa2feaea92f3aaf64438bfbb8ac6256 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Thu, 4 Dec 2025 21:58:47 +0530 Subject: [PATCH 22/29] lint fix --- .../cloudstack/storage/feign/client/VolumeFeignClient.java | 1 + .../storage/lifecycle/OntapPrimaryDatastoreLifecycle.java | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java index cdb898ad0ae1..6d96ac63d40f 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java @@ -51,3 +51,4 @@ public interface VolumeFeignClient { JobResponse updateVolumeRebalancing(@Param("authHeader") String authHeader, @Param("uuid") String uuid, Volume volumeRequest); } + diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index e16f991b32e1..40d83a7b1121 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -374,4 +374,3 @@ public void changeStoragePoolScopeToCluster(DataStore store, ClusterScope cluste } } - From 0aea9fb184155625cbdf554d513c80f864f377b5 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Thu, 4 Dec 2025 22:06:58 +0530 Subject: [PATCH 23/29] lint fix 1 --- .../cloudstack/storage/feign/client/VolumeFeignClient.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java index 6d96ac63d40f..4d946adbb124 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java @@ -50,5 +50,3 @@ public interface VolumeFeignClient { @Headers({ "Authorization: {authHeader}"}) JobResponse updateVolumeRebalancing(@Param("authHeader") String authHeader, @Param("uuid") String uuid, Volume volumeRequest); } - - From 409d28f7f8d69572114ff1c7e2becd6a595cf69d Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Thu, 4 Dec 2025 22:54:04 +0530 Subject: [PATCH 24/29] lint fix 2 --- .../cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java | 1 - .../storage/provider/OntapPrimaryDatastoreProvider.java | 0 .../cloudstack/storage/service/model/CloudStackVolume.java | 3 --- .../main/java/org/apache/cloudstack/storage/utils/Utility.java | 2 -- 4 files changed, 6 deletions(-) mode change 100644 => 100755 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 5b1713260f8b..82e3351ecb4a 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -22,7 +22,6 @@ import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import com.cloud.exception.InvalidParameterValueException; import com.cloud.host.Host; import com.cloud.storage.Storage; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java old mode 100644 new mode 100755 diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java index 269c3d9a1d17..ae5c90c3bd9e 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java @@ -19,12 +19,9 @@ package org.apache.cloudstack.storage.service.model; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.feign.model.FileInfo; import org.apache.cloudstack.storage.feign.model.Lun; -import org.apache.cloudstack.storage.feign.model.Volume; public class CloudStackVolume { diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index c846302317f1..b82befd55eb0 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -23,12 +23,10 @@ import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.feign.model.FileInfo; import org.apache.cloudstack.storage.feign.model.Lun; import org.apache.cloudstack.storage.feign.model.LunSpace; import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.feign.model.Svm; -import org.apache.cloudstack.storage.feign.model.Volume; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; import org.apache.cloudstack.storage.service.model.CloudStackVolume; From 516b5536011b7f416e29fb068fae0546bb3ab724 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 5 Dec 2025 20:23:17 +0530 Subject: [PATCH 25/29] testing and review comments fix --- plugins/storage/volume/ontap/pom.xml | 3 +- .../driver/OntapPrimaryDatastoreDriver.java | 3 +- .../OntapPrimaryDatastoreLifecycle.java | 2 +- .../storage/listener/OntapHostListener.java | 2 +- .../provider/StorageProviderFactory.java | 3 ++ .../storage/service/UnifiedNASStrategy.java | 47 +++++++++++++------ .../service/model/CloudStackVolume.java | 10 ++-- .../cloudstack/storage/utils/Utility.java | 4 +- 8 files changed, 48 insertions(+), 26 deletions(-) diff --git a/plugins/storage/volume/ontap/pom.xml b/plugins/storage/volume/ontap/pom.xml index 5cd012f82f43..afd3af113146 100644 --- a/plugins/storage/volume/ontap/pom.xml +++ b/plugins/storage/volume/ontap/pom.xml @@ -35,6 +35,7 @@ 1.6.2 3.8.1 2.22.2 + 2.13.4 @@ -76,7 +77,7 @@ com.fasterxml.jackson.core jackson-databind - 2.13.4 + ${jackson-databind.version} org.apache.httpcomponents diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 82e3351ecb4a..9e457e873886 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -103,7 +103,8 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet s_logger.info("createAsync: Started for data store [{}] and data object [{}] of type [{}]", dataStore, dataObject, dataObject.getType()); if (dataObject.getType() == DataObjectType.VOLUME) { - path = createCloudStackVolumeForTypeVolume(dataStore, (VolumeInfo)dataObject); + VolumeInfo volumeInfo = (VolumeInfo) dataObject; + path = createCloudStackVolumeForTypeVolume(dataStore, volumeInfo); createCmdResult = new CreateCmdResult(path, new Answer(null, true, null)); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 40d83a7b1121..4b3ee1bdfd39 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -195,7 +195,7 @@ public DataStore initialize(Map dsInfos) { parameters.setType(Storage.StoragePoolType.NetworkFilesystem); path = "/" + storagePoolName; s_logger.info("Setting NFS path for storage pool: " + path); - host = "10.193.192.136"; // TODO hardcoded for now + // host = "10.193.192.136"; // TODO hardcoded for now,uncomment and replace it with data lif break; case ISCSI: parameters.setType(Storage.StoragePoolType.Iscsi); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java index 34067873bdf9..2a83ceabda9b 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -57,7 +57,7 @@ public boolean hostConnect(long hostId, long poolId) { logger.info("Connect to host " + hostId + " from pool " + poolId); Host host = _hostDao.findById(hostId); if (host == null) { - logger.error("Failed to add host by HostListener as host was not found with id : {}", hostId); + logger.error("host was not found with id : {}", hostId); return false; } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java index e9448ec16ded..c67cfb76c30c 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.storage.provider; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.service.StorageStrategy; @@ -39,6 +40,7 @@ public static StorageStrategy getStrategy(OntapStorage ontapStorage) { case NFS: if (!ontapStorage.getIsDisaggregated()) { UnifiedNASStrategy unifiedNASStrategy = new UnifiedNASStrategy(ontapStorage); + ComponentContext.inject(unifiedNASStrategy); unifiedNASStrategy.setOntapStorage(ontapStorage); return unifiedNASStrategy; } @@ -46,6 +48,7 @@ public static StorageStrategy getStrategy(OntapStorage ontapStorage) { case ISCSI: if (!ontapStorage.getIsDisaggregated()) { UnifiedSANStrategy unifiedSANStrategy = new UnifiedSANStrategy(ontapStorage); + ComponentContext.inject(unifiedSANStrategy); unifiedSANStrategy.setOntapStorage(ontapStorage); return unifiedSANStrategy; } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index d70f894d7d9a..4d6948b1c01f 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -26,9 +26,9 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.exception.CloudRuntimeException; import feign.FeignException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.feign.FeignClientFactory; import org.apache.cloudstack.storage.feign.client.JobFeignClient; @@ -48,6 +48,7 @@ import org.apache.cloudstack.storage.service.model.CloudStackVolume; import org.apache.cloudstack.storage.utils.Constants; import org.apache.cloudstack.storage.utils.Utility; +import org.apache.cloudstack.storage.volume.VolumeObject; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -82,16 +83,21 @@ public void setOntapStorage(OntapStorage ontapStorage) { @Override public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume) { s_logger.info("createCloudStackVolume: Create cloudstack volume " + cloudstackVolume); - // Step 1: set cloudstack volume metadata - String volumeUuid = updateCloudStackVolumeMetadata(cloudstackVolume.getDatastoreId(), cloudstackVolume.getVolumeInfo()); - // Step 2: Send command to KVM host to create qcow2 file using qemu-img - Answer answer = createVolumeOnKVMHost(cloudstackVolume.getVolumeInfo()); + try { + // Step 1: set cloudstack volume metadata + String volumeUuid = updateCloudStackVolumeMetadata(cloudstackVolume.getDatastoreId(), cloudstackVolume.getVolumeInfo()); + // Step 2: Send command to KVM host to create qcow2 file using qemu-img + Answer answer = createVolumeOnKVMHost(cloudstackVolume.getVolumeInfo()); if (answer == null || !answer.getResult()) { String errMsg = answer != null ? answer.getDetails() : "Failed to create qcow2 on KVM host"; s_logger.error("createCloudStackVolumeForTypeVolume: " + errMsg); throw new CloudRuntimeException(errMsg); } return cloudstackVolume; + }catch (Exception e) { + s_logger.error("createCloudStackVolumeForTypeVolume: error occured " + e); + throw new CloudRuntimeException(e); + } } @Override @@ -374,18 +380,29 @@ private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String sv return exportPolicy; } - private String updateCloudStackVolumeMetadata(String dataStoreId, VolumeInfo volumeInfo) { - s_logger.info("createManagedNfsVolume called with datastoreID: {} volumeInfo: {} ", dataStoreId, volumeInfo ); - VolumeVO volume = volumeDao.findById(volumeInfo.getId()); - String volumeUuid = volumeInfo.getUuid(); - volume.setPoolType(Storage.StoragePoolType.NetworkFilesystem); - volume.setPoolId(Long.parseLong(dataStoreId)); //need to check if volume0 already has this data filled - volume.setPath(volumeUuid); // Filename for qcow2 file - volumeDao.update(volume.getId(), volume); - return volumeUuid; + private String updateCloudStackVolumeMetadata(String dataStoreId, DataObject volumeInfo) { + s_logger.info("updateCloudStackVolumeMetadata called with datastoreID: {} volumeInfo: {} ", dataStoreId, volumeInfo ); + try { + VolumeObject volumeObject = (VolumeObject) volumeInfo; + long volumeId = volumeObject.getId(); + s_logger.info("VolumeInfo ID from VolumeObject: {}", volumeId); + VolumeVO volume = volumeDao.findById(volumeId); + if (volume == null) { + throw new CloudRuntimeException("Volume not found with id: " + volumeId); + } + String volumeUuid = volumeInfo.getUuid(); + volume.setPoolType(Storage.StoragePoolType.NetworkFilesystem); + volume.setPoolId(Long.parseLong(dataStoreId)); + volume.setPath(volumeUuid); // Filename for qcow2 file + volumeDao.update(volume.getId(), volume); + return volumeUuid; + }catch (Exception e){ + s_logger.error("Exception while updating volumeInfo: {} in volume: {}", dataStoreId, volumeInfo.getUuid(), e); + throw new CloudRuntimeException("Exception while updating volumeInfo: " + e.getMessage()); + } } - private Answer createVolumeOnKVMHost(VolumeInfo volumeInfo) { + private Answer createVolumeOnKVMHost(DataObject volumeInfo) { s_logger.info("createVolumeOnKVMHost called with volumeInfo: {} ", volumeInfo); try { diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java index ae5c90c3bd9e..6c51e4630800 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java @@ -19,7 +19,7 @@ package org.apache.cloudstack.storage.service.model; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.storage.feign.model.FileInfo; import org.apache.cloudstack.storage.feign.model.Lun; @@ -28,7 +28,7 @@ public class CloudStackVolume { private FileInfo file; private Lun lun; private String datastoreId; - private VolumeInfo volumeInfo; + private DataObject volumeInfo; // This is needed as we need DataObject to be passed to agent to create volume public FileInfo getFile() { return file; } @@ -50,10 +50,10 @@ public String getDatastoreId() { public void setDatastoreId(String datastoreId) { this.datastoreId = datastoreId; } - public VolumeInfo getVolumeInfo() { + public DataObject getVolumeInfo() { return volumeInfo; } - public void setVolumeInfo(VolumeInfo volumeInfot) { - this.volumeInfo = volumeInfot; + public void setVolumeInfo(DataObject volumeInfo) { + this.volumeInfo = volumeInfo; } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index b82befd55eb0..eb10ef195681 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -21,7 +21,7 @@ import com.cloud.utils.StringUtils; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.feign.model.Lun; import org.apache.cloudstack.storage.feign.model.LunSpace; @@ -56,7 +56,7 @@ public static String generateAuthHeader (String username, String password) { return BASIC + StringUtils.SPACE + new String(encodedBytes); } - public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, VolumeInfo volumeObject) { + public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, DataObject volumeObject) { CloudStackVolume cloudStackVolumeRequest = null; String protocol = details.get(Constants.PROTOCOL); From 74e6584a54e9de71f0435917a72cb6f0e0ed0d0f Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Mon, 8 Dec 2025 14:25:52 +0530 Subject: [PATCH 26/29] pull latest main 2 --- .../driver/OntapPrimaryDatastoreDriver.java | 24 +------------------ .../provider/StorageProviderFactory.java | 2 +- .../cloudstack/storage/utils/Constants.java | 2 -- 3 files changed, 2 insertions(+), 26 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 100489e5c15d..5e79aa2298da 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -44,8 +44,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.feign.model.OntapStorage; -import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; import org.apache.cloudstack.storage.service.model.CloudStackVolume; import org.apache.cloudstack.storage.service.model.ProtocolType; @@ -131,7 +129,7 @@ private String createCloudStackVolumeForTypeVolume(DataStore dataStore, VolumeIn throw new CloudRuntimeException("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); } Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId()); - StorageStrategy storageStrategy = getStrategyByStoragePoolDetails(details); + StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details); s_logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(Constants.SVM_NAME)); CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, volumeObject); CloudStackVolume cloudStackVolume = storageStrategy.createCloudStackVolume(cloudStackVolumeRequest); @@ -295,24 +293,4 @@ public boolean isStorageSupportHA(Storage.StoragePoolType type) { public void detachVolumeFromAllStorageNodes(Volume volume) { } - - private StorageStrategy getStrategyByStoragePoolDetails(Map details) { - if (details == null || details.isEmpty()) { - s_logger.error("getStrategyByStoragePoolDetails: Storage pool details are null or empty"); - throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Storage pool details are null or empty"); - } - String protocol = details.get(Constants.PROTOCOL); - OntapStorage ontapStorage = new OntapStorage(details.get(Constants.USERNAME), details.get(Constants.PASSWORD), - details.get(Constants.MANAGEMENT_LIF), details.get(Constants.SVM_NAME), Long.parseLong(details.get(Constants.SIZE)), ProtocolType.valueOf(protocol), - Boolean.parseBoolean(details.get(Constants.IS_DISAGGREGATED))); - StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage); - boolean isValid = storageStrategy.connect(); - if (isValid) { - s_logger.info("Connection to Ontap SVM [{}] successful", details.get(Constants.SVM_NAME)); - return storageStrategy; - } else { - s_logger.error("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed"); - throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed"); - } - } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java index c67cfb76c30c..5947212efd96 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java @@ -37,7 +37,7 @@ public static StorageStrategy getStrategy(OntapStorage ontapStorage) { ProtocolType protocol = ontapStorage.getProtocol(); s_logger.info("Initializing StorageProviderFactory with protocol: " + protocol); switch (protocol) { - case NFS: + case NFS3: if (!ontapStorage.getIsDisaggregated()) { UnifiedNASStrategy unifiedNASStrategy = new UnifiedNASStrategy(ontapStorage); ComponentContext.inject(unifiedNASStrategy); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java index d8313c73d4d7..a45fb4a5b21d 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java @@ -35,8 +35,6 @@ public class Constants { public static final String VOLUME_UUID = "volumeUUID"; public static final String IS_DISAGGREGATED = "isDisaggregated"; public static final String RUNNING = "running"; - public static final String VOLUME_UUID = "volumeUUID"; - public static final String VOLUME_NAME = "volumeNAME"; public static final String EXPORT = "export"; public static final int ONTAP_PORT = 443; From 93e66b4f04690b2cd567def72a46e9a65853c409 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Mon, 8 Dec 2025 14:40:32 +0530 Subject: [PATCH 27/29] lint fix --- .../storage/lifecycle/OntapPrimaryDatastoreLifecycle.java | 2 +- .../apache/cloudstack/storage/service/model/AccessGroup.java | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index f6efbd630d97..2a34284ba189 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -425,4 +425,4 @@ public void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterSc public void changeStoragePoolScopeToCluster(DataStore store, ClusterScope clusterScope, Hypervisor.HypervisorType hypervisorType) { } -} \ No newline at end of file +} diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java index 0695987c6033..9ff80e7cf8a9 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java @@ -70,5 +70,4 @@ public Scope getScope() { public void setScope(Scope scope) { this.scope = scope; } - -} \ No newline at end of file +} From d5e07281c0070c6c4868bd0e9c0ddcb64c6166e4 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Mon, 8 Dec 2025 14:46:57 +0530 Subject: [PATCH 28/29] lint fix 2 --- .../feign/client/NetworkFeignClient.java | 18 ++++++++++ .../storage/listener/OntapHostListener.java | 34 ++++++++++--------- 2 files changed, 36 insertions(+), 16 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java index 4acbbecf6573..4dc82a68238e 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java @@ -1,3 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.apache.cloudstack.storage.feign.client; import feign.Headers; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java index 2a83ceabda9b..30c698995a8f 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -1,19 +1,21 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.apache.cloudstack.storage.listener; From 90e6d565e7c1203d2088b7956b845c01f011449f Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Mon, 8 Dec 2025 15:26:01 +0530 Subject: [PATCH 29/29] testing fix 3 --- .../OntapPrimaryDatastoreLifecycle.java | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 2a34284ba189..2cdd7de0b7c5 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -45,6 +45,7 @@ import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl; import org.apache.cloudstack.storage.feign.model.ExportPolicy; import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.Volume; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; import org.apache.cloudstack.storage.service.model.AccessGroup; @@ -220,6 +221,21 @@ public DataStore initialize(Map dsInfos) { } s_logger.info("Using Data LIF for storage access: " + dataLIF); details.put(Constants.DATA_LIF, dataLIF); + s_logger.info("Creating ONTAP volume '" + storagePoolName + "' with size: " + volumeSize + " bytes (" + + (volumeSize / (1024 * 1024 * 1024)) + " GB)"); + try { + Volume volume = storageStrategy.createStorageVolume(storagePoolName, volumeSize); + if (volume == null) { + s_logger.error("createStorageVolume returned null for volume: " + storagePoolName); + throw new CloudRuntimeException("Failed to create ONTAP volume: " + storagePoolName); + } + s_logger.info("Volume object retrieved successfully. UUID: " + volume.getUuid() + ", Name: " + volume.getName()); + details.putIfAbsent(Constants.VOLUME_UUID, volume.getUuid()); + details.putIfAbsent(Constants.VOLUME_NAME, volume.getName()); + } catch (Exception e) { + s_logger.error("Exception occurred while creating ONTAP volume: " + storagePoolName, e); + throw new CloudRuntimeException("Failed to create ONTAP volume: " + storagePoolName + ". Error: " + e.getMessage(), e); + } } else { throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage"); }