From 5f71168a17fff04500190bce185118bf4e81cafe Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 7 Nov 2025 10:40:19 +0530 Subject: [PATCH 1/9] NFS Cloudstack volume and export policy utils --- .../driver/OntapPrimaryDatastoreDriver.java | 4 +- .../storage/feign/client/NASFeignClient.java | 30 +-- .../feign/client/VolumeFeignClient.java | 12 +- .../storage/feign/model/FileInfo.java | 0 .../storage/feign/model/LunMap.java | 0 .../cloudstack/storage/feign/model/Qos.java | 0 .../OntapPrimaryDatastoreLifecycle.java | 36 +++- .../storage/provider/OntapHostListener.java | 37 ++++ .../OntapPrimaryDatastoreProvider.java | 5 +- .../provider/StorageProviderFactory.java | 2 +- .../storage/service/StorageStrategy.java | 25 ++- .../storage/service/UnifiedNASStrategy.java | 187 +++++++++++++++++- .../storage/service/model/AccessGroup.java | 29 +++ .../service/model/CloudStackVolume.java | 16 ++ .../storage/service/model/ProtocolType.java | 2 +- .../cloudstack/storage/utils/Constants.java | 2 + .../cloudstack/storage/utils/Utility.java | 112 +++++++---- 17 files changed, 430 insertions(+), 69 deletions(-) mode change 100644 => 100755 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java mode change 100644 => 100755 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunMap.java mode change 100644 => 100755 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Qos.java create mode 100644 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index e2eb6220230a..bf027c6a1466 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -129,10 +129,12 @@ private String createCloudStackVolumeForTypeVolume(DataStore dataStore, DataObje Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId()); StorageStrategy storageStrategy = getStrategyByStoragePoolDetails(details); s_logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(Constants.SVM_NAME)); - CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, dataObject); + CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, (VolumeInfo) dataObject); CloudStackVolume cloudStackVolume = storageStrategy.createCloudStackVolume(cloudStackVolumeRequest); if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL)) && cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) { return cloudStackVolume.getLun().getName(); + } else if (ProtocolType.NFS.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + return cloudStackVolume.getFile().getName(); } else { String errMsg = "createCloudStackVolumeForTypeVolume: Volume creation failed. Lun or Lun Path is null for dataObject: " + dataObject; s_logger.error(errMsg); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java index b7aac9954cfe..339962cad25e 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java @@ -26,39 +26,39 @@ import feign.Param; import feign.RequestLine; -//TODO: Proper URLs should be added in the RequestLine annotations below public interface NASFeignClient { // File Operations @RequestLine("GET /{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) OntapResponse getFileResponse(@Param("authHeader") String authHeader, - @Param("volumeUuid") String volumeUUID, - @Param("path") String filePath); + @Param("volumeUuid") String volumeUUID, + @Param("path") String filePath); @RequestLine("DELETE /{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) void deleteFile(@Param("authHeader") String authHeader, - @Param("volumeUuid") String volumeUUID, - @Param("path") String filePath); + @Param("volumeUuid") String volumeUUID, + @Param("path") String filePath); @RequestLine("PATCH /{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) void updateFile(@Param("authHeader") String authHeader, - @Param("volumeUuid") String volumeUUID, - @Param("path") String filePath, FileInfo fileInfo); + @Param("volumeUuid") String volumeUUID, + @Param("path") String filePath, + FileInfo fileInfo); @RequestLine("POST /{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) void createFile(@Param("authHeader") String authHeader, - @Param("volumeUuid") String volumeUUID, - @Param("path") String filePath, FileInfo file); + @Param("volumeUuid") String volumeUUID, + @Param("path") String filePath, + FileInfo file); // Export Policy Operations @RequestLine("POST /") - @Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"}) + @Headers({"Authorization: {authHeader}"}) ExportPolicy createExportPolicy(@Param("authHeader") String authHeader, - @Param("returnRecords") boolean returnRecords, ExportPolicy exportPolicy); @RequestLine("GET /") @@ -68,16 +68,16 @@ ExportPolicy createExportPolicy(@Param("authHeader") String authHeader, @RequestLine("GET /{id}") @Headers({"Authorization: {authHeader}"}) OntapResponse getExportPolicyById(@Param("authHeader") String authHeader, - @Param("id") String id); + @Param("id") String id); @RequestLine("DELETE /{id}") @Headers({"Authorization: {authHeader}"}) void deleteExportPolicyById(@Param("authHeader") String authHeader, - @Param("id") String id); + @Param("id") String id); @RequestLine("PATCH /{id}") @Headers({"Authorization: {authHeader}"}) OntapResponse updateExportPolicy(@Param("authHeader") String authHeader, - @Param("id") String id, - ExportPolicy request); + @Param("id") String id, + ExportPolicy request); } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java index 9a2c76639221..cdb898ad0ae1 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java @@ -18,11 +18,15 @@ */ package org.apache.cloudstack.storage.feign.client; +import feign.QueryMap; import org.apache.cloudstack.storage.feign.model.Volume; import org.apache.cloudstack.storage.feign.model.response.JobResponse; import feign.Headers; import feign.Param; import feign.RequestLine; +import org.apache.cloudstack.storage.feign.model.response.OntapResponse; + +import java.util.Map; public interface VolumeFeignClient { @@ -38,8 +42,12 @@ public interface VolumeFeignClient { @Headers({"Authorization: {authHeader}"}) Volume getVolumeByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid); + @RequestLine("GET /api/storage/volumes") + @Headers({"Authorization: {authHeader}"}) + OntapResponse getVolume(@Param("authHeader") String authHeader, @QueryMap Map queryMap); + @RequestLine("PATCH /api/storage/volumes/{uuid}") - @Headers({"Accept: {acceptHeader}", "Authorization: {authHeader}"}) - JobResponse updateVolumeRebalancing(@Param("acceptHeader") String acceptHeader, @Param("uuid") String uuid, Volume volumeRequest); + @Headers({ "Authorization: {authHeader}"}) + JobResponse updateVolumeRebalancing(@Param("authHeader") String authHeader, @Param("uuid") String uuid, Volume volumeRequest); } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java old mode 100644 new mode 100755 diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunMap.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunMap.java old mode 100644 new mode 100755 diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Qos.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Qos.java old mode 100644 new mode 100755 diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 01b013f606dd..e25fa7af3644 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -40,10 +40,13 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl; import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.Volume; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; +import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.ProtocolType; import org.apache.cloudstack.storage.utils.Constants; +import org.apache.cloudstack.storage.utils.Utility; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -184,7 +187,7 @@ public DataStore initialize(Map dsInfos) { String path; ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); switch (protocol) { - case NFS3: + case NFS: parameters.setType(Storage.StoragePoolType.NetworkFilesystem); path = details.get(Constants.MANAGEMENT_LIF) + ":/" + storagePoolName; s_logger.info("Setting NFS path for storage pool: " + path); @@ -213,7 +216,9 @@ public DataStore initialize(Map dsInfos) { long volumeSize = Long.parseLong(details.get(Constants.SIZE)); s_logger.info("Creating ONTAP volume '" + storagePoolName + "' with size: " + volumeSize + " bytes (" + (volumeSize / (1024 * 1024 * 1024)) + " GB)"); - storageStrategy.createStorageVolume(storagePoolName, volumeSize); + Volume volume = storageStrategy.createStorageVolume(storagePoolName, volumeSize); + details.put(Constants.VOLUME_UUID, volume.getUuid()); + details.put(Constants.VOLUME_NAME, volume.getName()); } else { throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage"); } @@ -241,10 +246,20 @@ public DataStore initialize(Map dsInfos) { @Override public boolean attachCluster(DataStore dataStore, ClusterScope scope) { logger.debug("In attachCluster for ONTAP primary storage"); - PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore; - List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore); + PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore; + List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore); - logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId())); + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryStore.getClusterId())); + + Map details = primaryStore.getDetails(); // TODO check while testing , if it is populated we can remove below db call + StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); + AccessGroup accessGroupRequest = new AccessGroup(); + accessGroupRequest.setHostsToConnect(hostsToConnect); + accessGroupRequest.setScope(scope); + accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); + strategy.createAccessGroup(accessGroupRequest); + + logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); for (HostVO host : hostsToConnect) { // TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster try { @@ -265,9 +280,18 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) { logger.debug("In attachZone for ONTAP primary storage"); - List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM); + PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore; + List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM); logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); + + Map details = primaryStore.getDetails(); // TODO check while testing , if it is populated we can remove below db call + StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); + AccessGroup accessGroupRequest = new AccessGroup(); + accessGroupRequest.setHostsToConnect(hostsToConnect); + accessGroupRequest.setScope(scope); + accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); + strategy.createAccessGroup(accessGroupRequest); for (HostVO host : hostsToConnect) { // TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster try { diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java new file mode 100644 index 000000000000..beec2edabdff --- /dev/null +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java @@ -0,0 +1,37 @@ +package org.apache.cloudstack.storage.provider; + +import com.cloud.exception.StorageConflictException; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; + +class OntapHostListener implements HypervisorHostListener { + + @Override + public boolean hostAdded(long hostId) { + return false; + } + + @Override + public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { + return false; + } + + @Override + public boolean hostDisconnected(long hostId, long poolId) { + return false; + } + + @Override + public boolean hostAboutToBeRemoved(long hostId) { + return false; + } + + @Override + public boolean hostRemoved(long hostId, long clusterId) { + return false; + } + + @Override + public boolean hostEnabled(long hostId) { + return false; + } +} \ No newline at end of file diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java index fa2f14692c77..75d6f6310512 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java @@ -41,6 +41,7 @@ public class OntapPrimaryDatastoreProvider implements PrimaryDataStoreProvider { private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreProvider.class); private OntapPrimaryDatastoreDriver primaryDatastoreDriver; private OntapPrimaryDatastoreLifecycle primaryDatastoreLifecycle; + private OntapHostListener ontapHostListener; public OntapPrimaryDatastoreProvider() { s_logger.info("OntapPrimaryDatastoreProvider initialized"); @@ -57,7 +58,7 @@ public DataStoreDriver getDataStoreDriver() { @Override public HypervisorHostListener getHostListener() { - return null; + return ontapHostListener; } @Override @@ -71,6 +72,8 @@ public boolean configure(Map params) { s_logger.trace("OntapPrimaryDatastoreProvider: configure: Called"); primaryDatastoreDriver = ComponentContext.inject(OntapPrimaryDatastoreDriver.class); primaryDatastoreLifecycle = ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class); + ontapHostListener = ComponentContext.inject(OntapHostListener.class); + return true; } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java index 6bb6ad1fef73..e9448ec16ded 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java @@ -36,7 +36,7 @@ public static StorageStrategy getStrategy(OntapStorage ontapStorage) { ProtocolType protocol = ontapStorage.getProtocol(); s_logger.info("Initializing StorageProviderFactory with protocol: " + protocol); switch (protocol) { - case NFS3: + case NFS: if (!ontapStorage.getIsDisaggregated()) { UnifiedNASStrategy unifiedNASStrategy = new UnifiedNASStrategy(ontapStorage); unifiedNASStrategy.setOntapStorage(ontapStorage); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index 0f9706335784..37310a1467c2 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -156,8 +156,11 @@ public Volume createStorageVolume(String volumeName, Long size) { volumeRequest.setSize(size); // Make the POST API call to create the volume try { - // Create URI for POST CreateVolume API - // Call the VolumeFeignClient to create the volume + /* + ONTAP created a default rule of 0.0.0.0 if no export rule are defined while creating volume + and since in storage pool creation, cloudstack is not aware of the host , we can either create default or + permissive rule and later update it as part of attachCluster or attachZone implementation + */ JobResponse jobResponse = volumeFeignClient.createVolumeWithJob(authHeader, volumeRequest); if (jobResponse == null || jobResponse.getJob() == null) { throw new CloudRuntimeException("Failed to initiate volume creation for " + volumeName); @@ -192,8 +195,20 @@ public Volume createStorageVolume(String volumeName, Long size) { throw new CloudRuntimeException("Failed to create volume: " + e.getMessage()); } s_logger.info("Volume created successfully: " + volumeName); - //TODO - return null; + // Below code is to update volume uuid to storage pool mapping once and used for all other workflow saving get volume call + OntapResponse ontapVolume = new OntapResponse<>(); + try { + Map queryParams = Map.of(Constants.NAME, volumeName); + ontapVolume = volumeFeignClient.getVolume(authHeader, queryParams); + if ((ontapVolume == null || ontapVolume.getRecords().isEmpty())) { + s_logger.error("Exception while getting volume volume not found:"); + throw new CloudRuntimeException("Failed to fetch volume " + volumeName); + } + }catch (Exception e) { + s_logger.error("Exception while getting volume: " + e.getMessage()); + throw new CloudRuntimeException("Failed to fetch volume: " + e.getMessage()); + } + return ontapVolume.getRecords().get(0); } /** @@ -287,7 +302,7 @@ public Volume getStorageVolume(Volume volume) * @param accessGroup the access group to create * @return the created AccessGroup object */ - abstract AccessGroup createAccessGroup(AccessGroup accessGroup); + abstract public AccessGroup createAccessGroup(AccessGroup accessGroup); /** * Method encapsulates the behavior based on the opted protocol in subclasses diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index cb3079691c94..cd6631c119bb 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -19,23 +19,32 @@ package org.apache.cloudstack.storage.service; +import com.cloud.utils.exception.CloudRuntimeException; +import feign.FeignException; import org.apache.cloudstack.storage.feign.FeignClientFactory; import org.apache.cloudstack.storage.feign.client.NASFeignClient; +import org.apache.cloudstack.storage.feign.client.VolumeFeignClient; +import org.apache.cloudstack.storage.feign.model.ExportPolicy; +import org.apache.cloudstack.storage.feign.model.FileInfo; +import org.apache.cloudstack.storage.feign.model.Nas; import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.Svm; +import org.apache.cloudstack.storage.feign.model.Volume; +import org.apache.cloudstack.storage.feign.model.response.OntapResponse; import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; import org.apache.cloudstack.storage.utils.Constants; +import org.apache.cloudstack.storage.utils.Utility; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; - import java.util.Map; public class UnifiedNASStrategy extends NASStrategy { private static final Logger s_logger = LogManager.getLogger(UnifiedNASStrategy.class); - // Add missing Feign client setup for NAS operations private final FeignClientFactory feignClientFactory; private final NASFeignClient nasFeignClient; + private final VolumeFeignClient volumeFeignClient; public UnifiedNASStrategy(OntapStorage ontapStorage) { super(ontapStorage); @@ -43,6 +52,7 @@ public UnifiedNASStrategy(OntapStorage ontapStorage) { // Initialize FeignClientFactory and create NAS client this.feignClientFactory = new FeignClientFactory(); this.nasFeignClient = feignClientFactory.createClient(NASFeignClient.class, baseURL); + this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class, baseURL); } public void setOntapStorage(OntapStorage ontapStorage) { @@ -51,8 +61,17 @@ public void setOntapStorage(OntapStorage ontapStorage) { @Override public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume) { - //TODO: Implement NAS volume creation using nasFeignClient - return null; + s_logger.info("createCloudStackVolume: Create cloudstack volume " + cloudstackVolume); + try { + createFile(cloudstackVolume.getVolume().getUuid(),cloudstackVolume.getCloudstackVolName(), cloudstackVolume.getFile()); + s_logger.debug("Successfully created file in ONTAP under volume with path {} or name {} ", cloudstackVolume.getVolume().getUuid(), cloudstackVolume.getCloudstackVolName()); + FileInfo responseFile = cloudstackVolume.getFile(); + responseFile.setPath(cloudstackVolume.getCloudstackVolName()); + }catch (Exception e) { + s_logger.error("Exception occurred while creating file or dir: {}. Exception: {}", cloudstackVolume.getCloudstackVolName(), e.getMessage()); + throw new CloudRuntimeException("Failed to create file: " + e.getMessage()); + } + return cloudstackVolume; } @Override @@ -104,4 +123,164 @@ void enableLogicalAccess(Map values) { void disableLogicalAccess(Map values) { //TODO } + + + private ExportPolicy createExportPolicy(String svmName, String policyName) { + s_logger.info("Creating export policy: {} for SVM: {}", policyName, svmName); + + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + + // Create ExportPolicy object + ExportPolicy exportPolicy = new ExportPolicy(); + exportPolicy.setName(policyName); + + // Set SVM + Svm svm = new Svm(); + svm.setName(svmName); + exportPolicy.setSvm(svm); + + // Create export policy + ExportPolicy createdPolicy = nasFeignClient.createExportPolicy(authHeader, exportPolicy); + + if (createdPolicy != null && createdPolicy.getId() != null) { + s_logger.info("Export policy created successfully with ID: {}", createdPolicy.getId()); + return createdPolicy; + } else { + throw new CloudRuntimeException("Failed to create export policy: " + policyName); + } + + } catch (FeignException e) { + s_logger.error("Failed to create export policy: {}", policyName, e); + throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage()); + } catch (Exception e) { + s_logger.error("Exception while creating export policy: {}", policyName, e); + throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage()); + } + } + + + private void deleteExportPolicy(String svmName, String policyName) { + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); + + if (policiesResponse.getRecords() == null || policiesResponse.getRecords().isEmpty()) { + s_logger.warn("Export policy not found for deletion: {}", policyName); + throw new CloudRuntimeException("Export policy not found : " + policyName); + } + String policyId = policiesResponse.getRecords().get(0).getId().toString(); + nasFeignClient.deleteExportPolicyById(authHeader, policyId); + s_logger.info("Export policy deleted successfully: {}", policyName); + } catch (Exception e) { + s_logger.error("Failed to delete export policy: {}", policyName, e); + throw new CloudRuntimeException("Failed to delete export policy: " + policyName); + } + } + + + private String addExportRule(String policyName, String clientMatch, String[] protocols, String[] roRule, String[] rwRule) { + return ""; + } + + private String assignExportPolicyToVolume(String volumeUuid, String policyName) { + s_logger.info("Assigning export policy: {} to volume: {}", policyName, volumeUuid); + + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); + if (policiesResponse.getRecords() == null || policiesResponse.getRecords().isEmpty()) { + throw new CloudRuntimeException("Export policy not found: " + policyName); + } + ExportPolicy exportPolicy = policiesResponse.getRecords().get(0); + // Create Volume update object with NAS configuration + Volume volumeUpdate = new Volume(); + Nas nas = new Nas(); + nas.setExportPolicy(exportPolicy); + volumeUpdate.setNas(nas); + + volumeFeignClient.updateVolumeRebalancing(authHeader, volumeUuid, volumeUpdate); + s_logger.info("Export policy successfully assigned to volume: {}", volumeUuid); + return "Export policy " + policyName + " assigned to volume " + volumeUuid; + + } catch (FeignException e) { + s_logger.error("Failed to assign export policy to volume: {}", volumeUuid, e); + throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage()); + } catch (Exception e) { + s_logger.error("Exception while assigning export policy to volume: {}", volumeUuid, e); + throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage()); + } + } + + private boolean createFile(String volumeUuid, String filePath, FileInfo fileInfo) { + s_logger.info("Creating file: {} in volume: {}", filePath, volumeUuid); + + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + nasFeignClient.createFile(authHeader, volumeUuid, filePath, fileInfo); + s_logger.info("File created successfully: {} in volume: {}", filePath, volumeUuid); + return true; + } catch (FeignException e) { + s_logger.error("Failed to create file: {} in volume: {}", filePath, volumeUuid, e); + return false; + } catch (Exception e) { + s_logger.error("Exception while creating file: {} in volume: {}", filePath, volumeUuid, e); + return false; + } + } + + private boolean deleteFile(String volumeUuid, String filePath) { + s_logger.info("Deleting file: {} from volume: {}", filePath, volumeUuid); + + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + nasFeignClient.deleteFile(authHeader, volumeUuid, filePath); + s_logger.info("File deleted successfully: {} from volume: {}", filePath, volumeUuid); + return true; + } catch (FeignException e) { + s_logger.error("Failed to delete file: {} from volume: {}", filePath, volumeUuid, e); + return false; + } catch (Exception e) { + s_logger.error("Exception while deleting file: {} from volume: {}", filePath, volumeUuid, e); + return false; + } + } + + private OntapResponse getFileInfo(String volumeUuid, String filePath) { + s_logger.debug("Getting file info for: {} in volume: {}", filePath, volumeUuid); + + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + OntapResponse response = nasFeignClient.getFileResponse(authHeader, volumeUuid, filePath); + s_logger.debug("Retrieved file info for: {} in volume: {}", filePath, volumeUuid); + return response; + } catch (FeignException e) { + if (e.status() == 404) { + s_logger.debug("File not found: {} in volume: {}", filePath, volumeUuid); + return null; + } + s_logger.error("Failed to get file info: {} in volume: {}", filePath, volumeUuid, e); + throw new CloudRuntimeException("Failed to get file info: " + e.getMessage()); + } catch (Exception e) { + s_logger.error("Exception while getting file info: {} in volume: {}", filePath, volumeUuid, e); + throw new CloudRuntimeException("Failed to get file info: " + e.getMessage()); + } + } + + private boolean updateFile(String volumeUuid, String filePath, FileInfo fileInfo) { + s_logger.info("Updating file: {} in volume: {}", filePath, volumeUuid); + + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + nasFeignClient.updateFile( authHeader, volumeUuid, filePath, fileInfo); + s_logger.info("File updated successfully: {} in volume: {}", filePath, volumeUuid); + return true; + } catch (FeignException e) { + s_logger.error("Failed to update file: {} in volume: {}", filePath, volumeUuid, e); + return false; + } catch (Exception e) { + s_logger.error("Exception while updating file: {} in volume: {}", filePath, volumeUuid, e); + return false; + } + } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java index c4dfce7ce51c..a3467cc30d2d 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java @@ -19,14 +19,24 @@ package org.apache.cloudstack.storage.service.model; +import com.cloud.host.HostVO; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.storage.feign.model.ExportPolicy; import org.apache.cloudstack.storage.feign.model.Igroup; +import java.util.List; + public class AccessGroup { private Igroup igroup; private ExportPolicy exportPolicy; + private List hostsToConnect; + private PrimaryDataStoreInfo primaryDataStoreInfo; + private Scope scope; + + public Igroup getIgroup() { return igroup; } @@ -42,4 +52,23 @@ public ExportPolicy getPolicy() { public void setPolicy(ExportPolicy policy) { this.exportPolicy = policy; } + public List getHostsToConnect() { + return hostsToConnect; + } + public void setHostsToConnect(List hostsToConnect) { + this.hostsToConnect = hostsToConnect; + } + public PrimaryDataStoreInfo getPrimaryDataStoreInfo() { + return primaryDataStoreInfo; + } + public void setPrimaryDataStoreInfo(PrimaryDataStoreInfo primaryDataStoreInfo) { + this.primaryDataStoreInfo = primaryDataStoreInfo; + } + public Scope getScope() { + return scope; + } + public void setScope(Scope scope) { + this.scope = scope; + } + } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java index a7f5d8659d03..694c4a2c126f 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java @@ -21,11 +21,15 @@ import org.apache.cloudstack.storage.feign.model.FileInfo; import org.apache.cloudstack.storage.feign.model.Lun; +import org.apache.cloudstack.storage.feign.model.Volume; public class CloudStackVolume { private FileInfo file; private Lun lun; + private Volume volume; + // will be replaced after testing + private String cloudstackVolName; public FileInfo getFile() { return file; @@ -42,4 +46,16 @@ public Lun getLun() { public void setLun(Lun lun) { this.lun = lun; } + public Volume getVolume() { + return volume; + } + public void setVolume(Volume volume) { + this.volume = volume; + } + public String getCloudstackVolName() { + return cloudstackVolName; + } + public void setCloudstackVolName(String cloudstackVolName) { + this.cloudstackVolName = cloudstackVolName; + } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java index 00dca62480dc..47b55ec29bb7 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java @@ -20,6 +20,6 @@ package org.apache.cloudstack.storage.service.model; public enum ProtocolType { - NFS3, + NFS, ISCSI } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java index b58e8484cd48..a81fdb0a8ab5 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java @@ -31,6 +31,8 @@ public class Constants { public static final String MANAGEMENT_LIF = "managementLIF"; public static final String IS_DISAGGREGATED = "isDisaggregated"; public static final String RUNNING = "running"; + public static final String VOLUME_UUID = "volumeUUID"; + public static final String VOLUME_NAME = "volumeNAME"; public static final int ONTAP_PORT = 443; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index af48724f984c..cd02cbf10481 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -21,11 +21,16 @@ import com.cloud.utils.StringUtils; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.feign.model.FileInfo; import org.apache.cloudstack.storage.feign.model.Lun; import org.apache.cloudstack.storage.feign.model.LunSpace; +import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.feign.model.Svm; +import org.apache.cloudstack.storage.feign.model.Volume; +import org.apache.cloudstack.storage.provider.StorageProviderFactory; +import org.apache.cloudstack.storage.service.StorageStrategy; import org.apache.cloudstack.storage.service.model.CloudStackVolume; import org.apache.cloudstack.storage.service.model.ProtocolType; import org.apache.logging.log4j.LogManager; @@ -53,41 +58,82 @@ public static String generateAuthHeader (String username, String password) { return BASIC + StringUtils.SPACE + new String(encodedBytes); } - public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, DataObject dataObject) { - CloudStackVolume cloudStackVolumeRequest = null; + public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, VolumeInfo volumeObject) { + CloudStackVolume cloudStackVolumeRequest = null; - String protocol = details.get(Constants.PROTOCOL); - if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) { - cloudStackVolumeRequest = new CloudStackVolume(); - Lun lunRequest = new Lun(); - Svm svm = new Svm(); - svm.setName(details.get(Constants.SVM_NAME)); - lunRequest.setSvm(svm); + String protocol = details.get(Constants.PROTOCOL); + ProtocolType protocolType = ProtocolType.valueOf(protocol); + switch (protocolType) { + case NFS: + // TODO add logic for NFS file creation + cloudStackVolumeRequest = new CloudStackVolume(); + FileInfo file = new FileInfo(); + //file.setName("test1"); // to be replaced with volume name // this should not be passed for dir + //file.setName(volumeObject.getName()); // to check whether this needs to be sent or not + file.setSize(Long.parseLong("10000")); + file.setSize(volumeObject.getSize()); + file.setUnixPermissions(755); // check if it is needed only for dir ? it is needed for dir + file.setType(FileInfo.TypeEnum.DIRECTORY); // We are creating file for a cloudstack volume . Should it be dir ? // TODO change once multipart is done - LunSpace lunSpace = new LunSpace(); - lunSpace.setSize(dataObject.getSize()); - lunRequest.setSpace(lunSpace); - //Lun name is full path like in unified "/vol/VolumeName/LunName" - String lunFullName = Constants.VOLUME_PATH_PREFIX + storagePool.getName() + Constants.PATH_SEPARATOR + dataObject.getName(); - lunRequest.setName(lunFullName); + Volume poolVolume = new Volume(); + poolVolume.setName(details.get(Constants.VOLUME_NAME)); + poolVolume.setUuid(details.get(Constants.VOLUME_UUID)); + cloudStackVolumeRequest.setVolume(poolVolume); + cloudStackVolumeRequest.setFile(file); + cloudStackVolumeRequest.setCloudstackVolName(volumeObject.getName()); + break; + case ISCSI: + cloudStackVolumeRequest = new CloudStackVolume(); + Lun lunRequest = new Lun(); + Svm svm = new Svm(); + svm.setName(details.get(Constants.SVM_NAME)); + lunRequest.setSvm(svm); - String hypervisorType = storagePool.getHypervisor().name(); - String osType = null; - switch (hypervisorType) { - case Constants.KVM: - osType = Lun.OsTypeEnum.LINUX.getValue(); - break; - default: - String errMsg = "createCloudStackVolume : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage"; - s_logger.error(errMsg); - throw new CloudRuntimeException(errMsg); - } - lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType)); + LunSpace lunSpace = new LunSpace(); + lunSpace.setSize(volumeObject.getSize()); + lunRequest.setSpace(lunSpace); + //Lun name is full path like in unified "/vol/VolumeName/LunName" + String lunFullName = Constants.VOLUME_PATH_PREFIX + storagePool.getName() + Constants.PATH_SEPARATOR + volumeObject.getName(); + lunRequest.setName(lunFullName); - cloudStackVolumeRequest.setLun(lunRequest); - return cloudStackVolumeRequest; - } else { - throw new CloudRuntimeException("createCloudStackVolumeRequestByProtocol: Unsupported protocol " + protocol); - } + String hypervisorType = storagePool.getHypervisor().name(); + String osType = null; + switch (hypervisorType) { + case Constants.KVM: + osType = Lun.OsTypeEnum.LINUX.getValue(); + break; + default: + String errMsg = "createCloudStackVolume : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage"; + s_logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType)); + cloudStackVolumeRequest.setLun(lunRequest); + break; + default: + throw new CloudRuntimeException("createCloudStackVolumeRequestByProtocol: Unsupported protocol " + protocol); + + } + return cloudStackVolumeRequest; + } + + public static StorageStrategy getStrategyByStoragePoolDetails(Map details) { + if (details == null || details.isEmpty()) { + s_logger.error("getStrategyByStoragePoolDetails: Storage pool details are null or empty"); + throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Storage pool details are null or empty"); + } + String protocol = details.get(Constants.PROTOCOL); + OntapStorage ontapStorage = new OntapStorage(details.get(Constants.USERNAME), details.get(Constants.PASSWORD), + details.get(Constants.MANAGEMENT_LIF), details.get(Constants.SVM_NAME), ProtocolType.valueOf(protocol), + Boolean.parseBoolean(details.get(Constants.IS_DISAGGREGATED))); + StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage); + boolean isValid = storageStrategy.connect(); + if (isValid) { + s_logger.info("Connection to Ontap SVM [{}] successful", details.get(Constants.SVM_NAME)); + return storageStrategy; + } else { + s_logger.error("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed"); + throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed"); + } } } From c88483ae6ef217ee78a6e8a51892cd1b25d21516 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 7 Nov 2025 11:38:56 +0530 Subject: [PATCH 2/9] Licencse add in files --- .../storage/feign/FeignConfiguration.java | 19 +++++++++++++++++++ .../storage/provider/OntapHostListener.java | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java index ce2783add228..fc4d3484506b 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java @@ -1,3 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + package org.apache.cloudstack.storage.feign; import feign.RequestInterceptor; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java index beec2edabdff..2f1a81da468a 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java @@ -1,3 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + package org.apache.cloudstack.storage.provider; import com.cloud.exception.StorageConflictException; From e3c44ae554257e929d5aa3ff658a8e638dc075d9 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Mon, 10 Nov 2025 15:18:34 +0530 Subject: [PATCH 3/9] accessgroup create recode --- plugins/storage/volume/ontap/pom.xml | 3 +- .../driver/OntapPrimaryDatastoreDriver.java | 1 - .../storage/feign/FeignConfiguration.java | 41 ++++++++-- .../OntapPrimaryDatastoreLifecycle.java | 31 +++++++- .../storage/provider/OntapHostListener.java | 74 ++++++++++--------- .../OntapPrimaryDatastoreProvider.java | 6 +- .../storage/service/StorageStrategy.java | 36 ++++++--- .../storage/service/UnifiedNASStrategy.java | 63 ++++++++++++---- 8 files changed, 177 insertions(+), 78 deletions(-) mode change 100644 => 100755 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java mode change 100644 => 100755 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java diff --git a/plugins/storage/volume/ontap/pom.xml b/plugins/storage/volume/ontap/pom.xml index 10ca7935f408..3da605f12f9d 100644 --- a/plugins/storage/volume/ontap/pom.xml +++ b/plugins/storage/volume/ontap/pom.xml @@ -31,7 +31,6 @@ 2021.0.7 11.0 20230227 - 2.15.2 4.5.14 1.6.2 3.8.1 @@ -77,7 +76,7 @@ com.fasterxml.jackson.core jackson-databind - ${jackson-databind.version} + 2.13.4 org.apache.httpcomponents diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index bf027c6a1466..979e761eb4b0 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -179,7 +179,6 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore @Override public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { - } @Override diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java index fc4d3484506b..e9c504e8de71 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java @@ -30,7 +30,7 @@ import feign.codec.EncodeException; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.DeserializationFeature; -import com.fasterxml.jackson.databind.json.JsonMapper; +import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.http.conn.ConnectionKeepAliveStrategy; import org.apache.http.conn.ssl.NoopHostnameVerifier; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; @@ -55,13 +55,11 @@ public class FeignConfiguration { private final int retryMaxInterval = 5; private final String ontapFeignMaxConnection = "80"; private final String ontapFeignMaxConnectionPerRoute = "20"; - private final JsonMapper jsonMapper; + private final ObjectMapper jsonMapper; public FeignConfiguration() { - this.jsonMapper = JsonMapper.builder() - .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) - .findAndAddModules() - .build(); + this.jsonMapper = new ObjectMapper(); + this.jsonMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); } public Client createClient() { @@ -139,16 +137,43 @@ public Decoder createDecoder() { @Override public Object decode(Response response, Type type) throws IOException, DecodeException { if (response.body() == null) { + logger.debug("Response body is null, returning null"); return null; } String json = null; try (InputStream bodyStream = response.body().asInputStream()) { json = new String(bodyStream.readAllBytes(), StandardCharsets.UTF_8); logger.debug("Decoding JSON response: {}", json); - return jsonMapper.readValue(json, jsonMapper.getTypeFactory().constructType(type)); + logger.debug("Target type: {}", type); + logger.debug("About to call jsonMapper.readValue()..."); + + Object result = null; + try { + logger.debug("Calling jsonMapper.constructType()..."); + var javaType = jsonMapper.getTypeFactory().constructType(type); + logger.debug("constructType() returned: {}", javaType); + + logger.debug("Calling jsonMapper.readValue() with json and javaType..."); + result = jsonMapper.readValue(json, javaType); + logger.debug("jsonMapper.readValue() completed successfully"); + } catch (Throwable ex) { + logger.error("EXCEPTION in jsonMapper.readValue()! Type: {}, Message: {}", ex.getClass().getName(), ex.getMessage(), ex); + throw ex; + } + + if (result == null) { + logger.warn("Decoded result is null!"); + } else { + logger.debug("Successfully decoded to object of type: {}", result.getClass().getName()); + } + logger.debug("Returning result from decoder"); + return result; } catch (IOException e) { - logger.error("Error decoding JSON response. Status: {}, Raw body: {}", response.status(), json, e); + logger.error("IOException during decoding. Status: {}, Raw body: {}", response.status(), json, e); throw new DecodeException(response.status(), "Error decoding JSON response", response.request(), e); + } catch (Exception e) { + logger.error("Unexpected error during decoding. Status: {}, Type: {}, Raw body: {}", response.status(), type, json, e); + throw new DecodeException(response.status(), "Unexpected error during decoding", response.request(), e); } } }; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java old mode 100644 new mode 100755 index e25fa7af3644..784f7ab6ed02 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -38,8 +38,11 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl; +import org.apache.cloudstack.storage.feign.model.ExportPolicy; import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.Volume; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; @@ -62,6 +65,7 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl @Inject private StorageManager _storageMgr; @Inject private ResourceManager _resourceMgr; @Inject private PrimaryDataStoreHelper _dataStoreHelper; + @Inject private StoragePoolDetailsDao storagePoolDetailsDao; private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class); // ONTAP minimum volume size is 1.56 GB (1677721600 bytes) @@ -216,9 +220,21 @@ public DataStore initialize(Map dsInfos) { long volumeSize = Long.parseLong(details.get(Constants.SIZE)); s_logger.info("Creating ONTAP volume '" + storagePoolName + "' with size: " + volumeSize + " bytes (" + (volumeSize / (1024 * 1024 * 1024)) + " GB)"); - Volume volume = storageStrategy.createStorageVolume(storagePoolName, volumeSize); - details.put(Constants.VOLUME_UUID, volume.getUuid()); - details.put(Constants.VOLUME_NAME, volume.getName()); + try { + Volume volume = storageStrategy.createStorageVolume(storagePoolName, volumeSize); + if (volume == null) { + s_logger.error("createStorageVolume returned null for volume: " + storagePoolName); + throw new CloudRuntimeException("Failed to create ONTAP volume: " + storagePoolName); + } + + s_logger.info("Volume object retrieved successfully. UUID: " + volume.getUuid() + ", Name: " + volume.getName()); + + details.putIfAbsent(Constants.VOLUME_UUID, volume.getUuid()); + details.putIfAbsent(Constants.VOLUME_NAME, volume.getName()); + } catch (Exception e) { + s_logger.error("Exception occurred while creating ONTAP volume: " + storagePoolName, e); + throw new CloudRuntimeException("Failed to create ONTAP volume: " + storagePoolName + ". Error: " + e.getMessage(), e); + } } else { throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage"); } @@ -249,14 +265,21 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore; List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore); + logger.debug(" datastore object received is {} ",primaryStore ); + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryStore.getClusterId())); - Map details = primaryStore.getDetails(); // TODO check while testing , if it is populated we can remove below db call + Map details = storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId()); StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); + Svm svm = new Svm(); + svm.setName(details.get(Constants.SVM_NAME)); + ExportPolicy exportPolicy = new ExportPolicy(); + exportPolicy.setSvm(svm); AccessGroup accessGroupRequest = new AccessGroup(); accessGroupRequest.setHostsToConnect(hostsToConnect); accessGroupRequest.setScope(scope); accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); + accessGroupRequest.setPolicy(exportPolicy); strategy.createAccessGroup(accessGroupRequest); logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java old mode 100644 new mode 100755 index 2f1a81da468a..3fb6406aa9cc --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java @@ -18,39 +18,41 @@ */ package org.apache.cloudstack.storage.provider; - -import com.cloud.exception.StorageConflictException; -import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; - -class OntapHostListener implements HypervisorHostListener { - - @Override - public boolean hostAdded(long hostId) { - return false; - } - - @Override - public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { - return false; - } - - @Override - public boolean hostDisconnected(long hostId, long poolId) { - return false; - } - - @Override - public boolean hostAboutToBeRemoved(long hostId) { - return false; - } - - @Override - public boolean hostRemoved(long hostId, long clusterId) { - return false; - } - - @Override - public boolean hostEnabled(long hostId) { - return false; - } -} \ No newline at end of file +// +//import com.cloud.exception.StorageConflictException; +//import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +// +//public class OntapHostListener implements HypervisorHostListener { +// +// public OntapHostListener(){} +// +// @Override +// public boolean hostAdded(long hostId) { +// return false; +// } +// +// @Override +// public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { +// return false; +// } +// +// @Override +// public boolean hostDisconnected(long hostId, long poolId) { +// return false; +// } +// +// @Override +// public boolean hostAboutToBeRemoved(long hostId) { +// return false; +// } +// +// @Override +// public boolean hostRemoved(long hostId, long clusterId) { +// return false; +// } +// +// @Override +// public boolean hostEnabled(long hostId) { +// return false; +// } +//} \ No newline at end of file diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java index 75d6f6310512..d954d1d413cf 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java @@ -41,7 +41,7 @@ public class OntapPrimaryDatastoreProvider implements PrimaryDataStoreProvider { private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreProvider.class); private OntapPrimaryDatastoreDriver primaryDatastoreDriver; private OntapPrimaryDatastoreLifecycle primaryDatastoreLifecycle; - private OntapHostListener ontapHostListener; + // private HypervisorHostListener listener; public OntapPrimaryDatastoreProvider() { s_logger.info("OntapPrimaryDatastoreProvider initialized"); @@ -58,7 +58,7 @@ public DataStoreDriver getDataStoreDriver() { @Override public HypervisorHostListener getHostListener() { - return ontapHostListener; + return null; } @Override @@ -72,7 +72,7 @@ public boolean configure(Map params) { s_logger.trace("OntapPrimaryDatastoreProvider: configure: Called"); primaryDatastoreDriver = ComponentContext.inject(OntapPrimaryDatastoreDriver.class); primaryDatastoreLifecycle = ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class); - ontapHostListener = ComponentContext.inject(OntapHostListener.class); + // listener = ComponentContext.inject(OntapHostListener.class); return true; } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index 37310a1467c2..045cf23af67f 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -196,19 +196,37 @@ public Volume createStorageVolume(String volumeName, Long size) { } s_logger.info("Volume created successfully: " + volumeName); // Below code is to update volume uuid to storage pool mapping once and used for all other workflow saving get volume call - OntapResponse ontapVolume = new OntapResponse<>(); try { Map queryParams = Map.of(Constants.NAME, volumeName); - ontapVolume = volumeFeignClient.getVolume(authHeader, queryParams); - if ((ontapVolume == null || ontapVolume.getRecords().isEmpty())) { - s_logger.error("Exception while getting volume volume not found:"); - throw new CloudRuntimeException("Failed to fetch volume " + volumeName); + s_logger.debug("Fetching volume details for: " + volumeName); + + OntapResponse ontapVolume = volumeFeignClient.getVolume(authHeader, queryParams); + s_logger.debug("Feign call completed. Processing response..."); + + if (ontapVolume == null) { + s_logger.error("OntapResponse is null for volume: " + volumeName); + throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": Response is null"); + } + s_logger.debug("OntapResponse is not null. Checking records field..."); + + if (ontapVolume.getRecords() == null) { + s_logger.error("OntapResponse.records is null for volume: " + volumeName); + throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": Records list is null"); } - }catch (Exception e) { - s_logger.error("Exception while getting volume: " + e.getMessage()); - throw new CloudRuntimeException("Failed to fetch volume: " + e.getMessage()); + s_logger.debug("Records field is not null. Size: " + ontapVolume.getRecords().size()); + + if (ontapVolume.getRecords().isEmpty()) { + s_logger.error("OntapResponse.records is empty for volume: " + volumeName); + throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": No records found"); + } + + Volume volume = ontapVolume.getRecords().get(0); + s_logger.info("Volume retrieved successfully: " + volumeName + ", UUID: " + volume.getUuid()); + return volume; + } catch (Exception e) { + s_logger.error("Exception while retrieving volume details for: " + volumeName, e); + throw new CloudRuntimeException("Failed to fetch volume: " + volumeName + ". Error: " + e.getMessage(), e); } - return ontapVolume.getRecords().get(0); } /** diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index cd6631c119bb..2df144193035 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -19,12 +19,14 @@ package org.apache.cloudstack.storage.service; +import com.cloud.host.HostVO; import com.cloud.utils.exception.CloudRuntimeException; import feign.FeignException; import org.apache.cloudstack.storage.feign.FeignClientFactory; import org.apache.cloudstack.storage.feign.client.NASFeignClient; import org.apache.cloudstack.storage.feign.client.VolumeFeignClient; import org.apache.cloudstack.storage.feign.model.ExportPolicy; +import org.apache.cloudstack.storage.feign.model.ExportRule; import org.apache.cloudstack.storage.feign.model.FileInfo; import org.apache.cloudstack.storage.feign.model.Nas; import org.apache.cloudstack.storage.feign.model.OntapStorage; @@ -37,6 +39,9 @@ import org.apache.cloudstack.storage.utils.Utility; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; + +import java.util.ArrayList; +import java.util.List; import java.util.Map; public class UnifiedNASStrategy extends NASStrategy { @@ -93,7 +98,35 @@ CloudStackVolume getCloudStackVolume(CloudStackVolume cloudstackVolume) { @Override public AccessGroup createAccessGroup(AccessGroup accessGroup) { - //TODO + + + // Create the export policy + String svmName = accessGroup.getPolicy().getSvm().getName(); + String exportPolicyName = "export-" + svmName + "-" + accessGroup.getPrimaryDataStoreInfo().getName(); + + ExportPolicy exportPolicy = new ExportPolicy(); + exportPolicy.setName(exportPolicyName); + + Svm svm = new Svm(); + svm.setName(svmName); + exportPolicy.setSvm(svm); + + List rules = new ArrayList<>(); + ExportRule exportRule = new ExportRule(); + + List hosts = accessGroup.getHostsToConnect(); + for (HostVO host : hosts) { + host.getStorageIpAddress() + } + + + exportPolicy.setRules(rules); + ExportPolicy createExportPolicy = createExportPolicy(svmName, exportPolicy); + + + + + // attach export policy to volume of storage pool return null; } @@ -125,36 +158,36 @@ void disableLogicalAccess(Map values) { } - private ExportPolicy createExportPolicy(String svmName, String policyName) { - s_logger.info("Creating export policy: {} for SVM: {}", policyName, svmName); + private ExportPolicy createExportPolicy(String svmName, ExportPolicy policy) { + s_logger.info("Creating export policy: {} for SVM: {}", policy, svmName); try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - // Create ExportPolicy object - ExportPolicy exportPolicy = new ExportPolicy(); - exportPolicy.setName(policyName); - - // Set SVM - Svm svm = new Svm(); - svm.setName(svmName); - exportPolicy.setSvm(svm); +// // Create ExportPolicy object +// ExportPolicy exportPolicy = new ExportPolicy(); +// exportPolicy.setName(policyName); +// +// // Set SVM +// Svm svm = new Svm(); +// svm.setName(svmName); +// exportPolicy.setSvm(svm); // Create export policy - ExportPolicy createdPolicy = nasFeignClient.createExportPolicy(authHeader, exportPolicy); + ExportPolicy createdPolicy = nasFeignClient.createExportPolicy(authHeader, policy); if (createdPolicy != null && createdPolicy.getId() != null) { s_logger.info("Export policy created successfully with ID: {}", createdPolicy.getId()); return createdPolicy; } else { - throw new CloudRuntimeException("Failed to create export policy: " + policyName); + throw new CloudRuntimeException("Failed to create export policy: " + policy); } } catch (FeignException e) { - s_logger.error("Failed to create export policy: {}", policyName, e); + s_logger.error("Failed to create export policy: {}", policy, e); throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage()); } catch (Exception e) { - s_logger.error("Exception while creating export policy: {}", policyName, e); + s_logger.error("Exception while creating export policy: {}", policy, e); throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage()); } } From 09caf90980b9d843a4a839d10da4cf69667acf9d Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Mon, 10 Nov 2025 19:29:16 +0530 Subject: [PATCH 4/9] creatacessgroup for NFS impl --- .../storage/feign/client/NASFeignClient.java | 6 +- .../storage/feign/model/ExportRule.java | 23 +++ .../OntapPrimaryDatastoreLifecycle.java | 11 +- .../storage/service/UnifiedNASStrategy.java | 147 ++++++++++++------ 4 files changed, 129 insertions(+), 58 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java index 339962cad25e..58280047e3fd 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java @@ -58,16 +58,16 @@ void createFile(@Param("authHeader") String authHeader, // Export Policy Operations @RequestLine("POST /") @Headers({"Authorization: {authHeader}"}) - ExportPolicy createExportPolicy(@Param("authHeader") String authHeader, + void createExportPolicy(@Param("authHeader") String authHeader, ExportPolicy exportPolicy); @RequestLine("GET /") @Headers({"Authorization: {authHeader}"}) - OntapResponse getExportPolicyResponse(@Param("authHeader") String authHeader); + ExportPolicy getExportPolicyResponse(@Param("authHeader") String authHeader); @RequestLine("GET /{id}") @Headers({"Authorization: {authHeader}"}) - OntapResponse getExportPolicyById(@Param("authHeader") String authHeader, + ExportPolicy getExportPolicyById(@Param("authHeader") String authHeader, @Param("id") String id); @RequestLine("DELETE /{id}") diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java index 8f3c9597dca7..769f94836b31 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java @@ -76,6 +76,13 @@ public static ProtocolsEnum fromValue(String text) { @JsonProperty("protocols") private List protocols = null; + @JsonProperty("ro_rule") + private List roRule = null; + + @JsonProperty("rw_rule") + private List rwRule = null; + + public ExportRule anonymousUser(String anonymousUser) { this.anonymousUser = anonymousUser; return this; @@ -140,6 +147,22 @@ public void setMatch (String match) { } } + public List getRwRule() { + return rwRule; + } + + public void setRwRule(List rwRule) { + this.rwRule = rwRule; + } + + public List getRoRule() { + return roRule; + } + + public void setRoRule(List roRule) { + this.roRule = roRule; + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 784f7ab6ed02..838aece8201e 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -42,7 +42,6 @@ import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl; import org.apache.cloudstack.storage.feign.model.ExportPolicy; import org.apache.cloudstack.storage.feign.model.OntapStorage; -import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.Volume; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; @@ -271,13 +270,11 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { Map details = storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId()); StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); - Svm svm = new Svm(); - svm.setName(details.get(Constants.SVM_NAME)); ExportPolicy exportPolicy = new ExportPolicy(); - exportPolicy.setSvm(svm); AccessGroup accessGroupRequest = new AccessGroup(); accessGroupRequest.setHostsToConnect(hostsToConnect); accessGroupRequest.setScope(scope); + primaryStore.setDetails(details);// setting details as it does not come from cloudstack accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); accessGroupRequest.setPolicy(exportPolicy); strategy.createAccessGroup(accessGroupRequest); @@ -308,13 +305,17 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM); logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); - Map details = primaryStore.getDetails(); // TODO check while testing , if it is populated we can remove below db call + Map details = storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId()); StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); + ExportPolicy exportPolicy = new ExportPolicy(); AccessGroup accessGroupRequest = new AccessGroup(); accessGroupRequest.setHostsToConnect(hostsToConnect); accessGroupRequest.setScope(scope); + primaryStore.setDetails(details); // setting details as it does not come from cloudstack accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); + accessGroupRequest.setPolicy(exportPolicy); strategy.createAccessGroup(accessGroupRequest); + for (HostVO host : hostsToConnect) { // TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster try { diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index 2df144193035..1212d75903c7 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -22,16 +22,20 @@ import com.cloud.host.HostVO; import com.cloud.utils.exception.CloudRuntimeException; import feign.FeignException; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.feign.FeignClientFactory; +import org.apache.cloudstack.storage.feign.client.JobFeignClient; import org.apache.cloudstack.storage.feign.client.NASFeignClient; import org.apache.cloudstack.storage.feign.client.VolumeFeignClient; import org.apache.cloudstack.storage.feign.model.ExportPolicy; import org.apache.cloudstack.storage.feign.model.ExportRule; import org.apache.cloudstack.storage.feign.model.FileInfo; +import org.apache.cloudstack.storage.feign.model.Job; import org.apache.cloudstack.storage.feign.model.Nas; import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.Volume; +import org.apache.cloudstack.storage.feign.model.response.JobResponse; import org.apache.cloudstack.storage.feign.model.response.OntapResponse; import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; @@ -40,6 +44,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import javax.inject.Inject; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -50,6 +55,9 @@ public class UnifiedNASStrategy extends NASStrategy { private final FeignClientFactory feignClientFactory; private final NASFeignClient nasFeignClient; private final VolumeFeignClient volumeFeignClient; + private final JobFeignClient jobFeignClient; + @Inject + private StoragePoolDetailsDao storagePoolDetailsDao; public UnifiedNASStrategy(OntapStorage ontapStorage) { super(ontapStorage); @@ -58,6 +66,7 @@ public UnifiedNASStrategy(OntapStorage ontapStorage) { this.feignClientFactory = new FeignClientFactory(); this.nasFeignClient = feignClientFactory.createClient(NASFeignClient.class, baseURL); this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class, baseURL); + this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL); } public void setOntapStorage(OntapStorage ontapStorage) { @@ -99,35 +108,54 @@ CloudStackVolume getCloudStackVolume(CloudStackVolume cloudstackVolume) { @Override public AccessGroup createAccessGroup(AccessGroup accessGroup) { - // Create the export policy - String svmName = accessGroup.getPolicy().getSvm().getName(); - String exportPolicyName = "export-" + svmName + "-" + accessGroup.getPrimaryDataStoreInfo().getName(); + Map details = accessGroup.getPrimaryDataStoreInfo().getDetails(); + String svmName = details.get(Constants.SVM_NAME); + String volumeUUID = details.get(Constants.VOLUME_UUID); + String volumeName = details.get(Constants.VOLUME_NAME); + String exportPolicyName = "export-" + svmName + "-" + volumeName;// TODO move this to util ExportPolicy exportPolicy = new ExportPolicy(); - exportPolicy.setName(exportPolicyName); - - Svm svm = new Svm(); - svm.setName(svmName); - exportPolicy.setSvm(svm); List rules = new ArrayList<>(); ExportRule exportRule = new ExportRule(); + List exportClients = new ArrayList<>(); List hosts = accessGroup.getHostsToConnect(); for (HostVO host : hosts) { - host.getStorageIpAddress() + String hostStorageIp = host.getStorageIpAddress(); + String ip = (hostStorageIp != null && !hostStorageIp.isEmpty()) + ? hostStorageIp + : host.getPrivateIpAddress(); + String ipToUse = ip + "/32"; + ExportRule.ExportClient exportClient = new ExportRule.ExportClient(); + exportClient.setMatch(ipToUse); + exportClients.add(exportClient); } + exportRule.setClients(exportClients); + exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.any)); + exportRule.setRoRule(List.of("any")); + exportRule.setRwRule(List.of("any")); + rules.add(exportRule); - + Svm svm = new Svm(); + svm.setName(svmName); + exportPolicy.setSvm(svm); exportPolicy.setRules(rules); - ExportPolicy createExportPolicy = createExportPolicy(svmName, exportPolicy); - - - - - // attach export policy to volume of storage pool - return null; + exportPolicy.setName(exportPolicyName); + try { + createExportPolicy(svmName, exportPolicy); + s_logger.info("ExportPolicy created: {}, now attaching this policy to storage pool volume", exportPolicy.getName()); + + // attach export policy to volume of storage pool + assignExportPolicyToVolume(volumeUUID,exportPolicy.getName()); + s_logger.info("Successfully assigned exportPolicy {} to volume {}", exportPolicy.getName(), volumeName); + accessGroup.setPolicy(exportPolicy); + return accessGroup; + }catch (Exception e){ + s_logger.error("Exception occurred while creating access group: " + e); + throw new CloudRuntimeException("Failed to create access group: " + e); + } } @Override @@ -158,31 +186,13 @@ void disableLogicalAccess(Map values) { } - private ExportPolicy createExportPolicy(String svmName, ExportPolicy policy) { + private void createExportPolicy(String svmName, ExportPolicy policy) { s_logger.info("Creating export policy: {} for SVM: {}", policy, svmName); try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - -// // Create ExportPolicy object -// ExportPolicy exportPolicy = new ExportPolicy(); -// exportPolicy.setName(policyName); -// -// // Set SVM -// Svm svm = new Svm(); -// svm.setName(svmName); -// exportPolicy.setSvm(svm); - - // Create export policy - ExportPolicy createdPolicy = nasFeignClient.createExportPolicy(authHeader, policy); - - if (createdPolicy != null && createdPolicy.getId() != null) { - s_logger.info("Export policy created successfully with ID: {}", createdPolicy.getId()); - return createdPolicy; - } else { - throw new CloudRuntimeException("Failed to create export policy: " + policy); - } - + nasFeignClient.createExportPolicy(authHeader, policy); + s_logger.info("Export policy created successfully with name {}", policy.getName()); } catch (FeignException e) { s_logger.error("Failed to create export policy: {}", policy, e); throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage()); @@ -192,17 +202,16 @@ private ExportPolicy createExportPolicy(String svmName, ExportPolicy policy) { } } - private void deleteExportPolicy(String svmName, String policyName) { try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); + ExportPolicy policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); - if (policiesResponse.getRecords() == null || policiesResponse.getRecords().isEmpty()) { + if (policiesResponse == null ) { s_logger.warn("Export policy not found for deletion: {}", policyName); throw new CloudRuntimeException("Export policy not found : " + policyName); } - String policyId = policiesResponse.getRecords().get(0).getId().toString(); + String policyId = policiesResponse.getId().toString(); nasFeignClient.deleteExportPolicyById(authHeader, policyId); s_logger.info("Export policy deleted successfully: {}", policyName); } catch (Exception e) { @@ -216,26 +225,64 @@ private String addExportRule(String policyName, String clientMatch, String[] pro return ""; } - private String assignExportPolicyToVolume(String volumeUuid, String policyName) { + private void assignExportPolicyToVolume(String volumeUuid, String policyName) { s_logger.info("Assigning export policy: {} to volume: {}", policyName, volumeUuid); try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); - if (policiesResponse.getRecords() == null || policiesResponse.getRecords().isEmpty()) { + ExportPolicy policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); + if (policiesResponse == null) { throw new CloudRuntimeException("Export policy not found: " + policyName); } - ExportPolicy exportPolicy = policiesResponse.getRecords().get(0); // Create Volume update object with NAS configuration Volume volumeUpdate = new Volume(); Nas nas = new Nas(); - nas.setExportPolicy(exportPolicy); + ExportPolicy policy = new ExportPolicy(); + policy.setName(policiesResponse.getName()); + nas.setExportPolicy(policy); volumeUpdate.setNas(nas); - volumeFeignClient.updateVolumeRebalancing(authHeader, volumeUuid, volumeUpdate); - s_logger.info("Export policy successfully assigned to volume: {}", volumeUuid); - return "Export policy " + policyName + " assigned to volume " + volumeUuid; + try { + /* + ONTAP created a default rule of 0.0.0.0 if no export rule are defined while creating volume + and since in storage pool creation, cloudstack is not aware of the host , we can either create default or + permissive rule and later update it as part of attachCluster or attachZone implementation + */ + JobResponse jobResponse = volumeFeignClient.updateVolumeRebalancing(authHeader, volumeUuid, volumeUpdate); + if (jobResponse == null || jobResponse.getJob() == null) { + throw new CloudRuntimeException("Failed to attach policy " + policiesResponse.getName() + "to volume " + volumeUuid); + } + String jobUUID = jobResponse.getJob().getUuid(); + + //Create URI for GET Job API + int jobRetryCount = 0; + Job createVolumeJob = null; + while(createVolumeJob == null || !createVolumeJob.getState().equals(Constants.JOB_SUCCESS)) { + if(jobRetryCount >= Constants.JOB_MAX_RETRIES) { + s_logger.error("Job to update volume " + volumeUuid + " did not complete within expected time."); + throw new CloudRuntimeException("Job to update volume " + volumeUuid + " did not complete within expected time."); + } + + try { + createVolumeJob = jobFeignClient.getJobByUUID(authHeader, jobUUID); + if (createVolumeJob == null) { + s_logger.warn("Job with UUID " + jobUUID + " not found. Retrying..."); + } else if (createVolumeJob.getState().equals(Constants.JOB_FAILURE)) { + throw new CloudRuntimeException("Job to update volume " + volumeUuid + " failed with error: " + createVolumeJob.getMessage()); + } + } catch (FeignException.FeignClientException e) { + throw new CloudRuntimeException("Failed to fetch job status: " + e.getMessage()); + } + + jobRetryCount++; + Thread.sleep(Constants.CREATE_VOLUME_CHECK_SLEEP_TIME); // Sleep for 2 seconds before polling again + } + } catch (Exception e) { + s_logger.error("Exception while updating volume: ", e); + throw new CloudRuntimeException("Failed to update volume: " + e.getMessage()); + } + s_logger.info("Export policy successfully assigned to volume: {}", volumeUuid); } catch (FeignException e) { s_logger.error("Failed to assign export policy to volume: {}", volumeUuid, e); throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage()); From aa14ab5648a5a9fd4ca38d963999169b5d6c9435 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 14 Nov 2025 10:22:28 +0530 Subject: [PATCH 5/9] storage pool mounting on host --- .../storage/feign/client/NASFeignClient.java | 23 +-- .../OntapPrimaryDatastoreLifecycle.java | 9 +- .../storage/listener/OntapHostListener.java | 168 ++++++++++++++++++ .../storage/provider/OntapHostListener.java | 58 ------ .../OntapPrimaryDatastoreProvider.java | 7 +- .../storage/service/StorageStrategy.java | 6 + .../storage/service/UnifiedNASStrategy.java | 122 ++++++++----- .../cloudstack/storage/utils/Constants.java | 2 + 8 files changed, 275 insertions(+), 120 deletions(-) create mode 100644 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java delete mode 100755 plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java index 58280047e3fd..f48f83dc28de 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.storage.feign.client; +import feign.QueryMap; import org.apache.cloudstack.storage.feign.model.ExportPolicy; import org.apache.cloudstack.storage.feign.model.FileInfo; import org.apache.cloudstack.storage.feign.model.response.OntapResponse; @@ -26,29 +27,31 @@ import feign.Param; import feign.RequestLine; +import java.util.Map; + public interface NASFeignClient { // File Operations - @RequestLine("GET /{volumeUuid}/files/{path}") + @RequestLine("GET /api/storage/volumes/{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) OntapResponse getFileResponse(@Param("authHeader") String authHeader, @Param("volumeUuid") String volumeUUID, @Param("path") String filePath); - @RequestLine("DELETE /{volumeUuid}/files/{path}") + @RequestLine("DELETE /api/storage/volumes/{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) void deleteFile(@Param("authHeader") String authHeader, @Param("volumeUuid") String volumeUUID, @Param("path") String filePath); - @RequestLine("PATCH /{volumeUuid}/files/{path}") + @RequestLine("PATCH /api/storage/volumes/{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) void updateFile(@Param("authHeader") String authHeader, @Param("volumeUuid") String volumeUUID, @Param("path") String filePath, FileInfo fileInfo); - @RequestLine("POST /{volumeUuid}/files/{path}") + @RequestLine("POST /api/storage/volumes/{volumeUuid}/files/{path}") @Headers({"Authorization: {authHeader}"}) void createFile(@Param("authHeader") String authHeader, @Param("volumeUuid") String volumeUUID, @@ -56,26 +59,26 @@ void createFile(@Param("authHeader") String authHeader, FileInfo file); // Export Policy Operations - @RequestLine("POST /") + @RequestLine("POST /api/protocols/nfs/export-policies") @Headers({"Authorization: {authHeader}"}) void createExportPolicy(@Param("authHeader") String authHeader, ExportPolicy exportPolicy); - @RequestLine("GET /") + @RequestLine("GET /api/protocols/nfs/export-policies") @Headers({"Authorization: {authHeader}"}) - ExportPolicy getExportPolicyResponse(@Param("authHeader") String authHeader); + OntapResponse getExportPolicyResponse(@Param("authHeader") String authHeader, @QueryMap Map queryMap); - @RequestLine("GET /{id}") + @RequestLine("GET /api/protocols/nfs/export-policies/{id}") @Headers({"Authorization: {authHeader}"}) ExportPolicy getExportPolicyById(@Param("authHeader") String authHeader, @Param("id") String id); - @RequestLine("DELETE /{id}") + @RequestLine("DELETE /api/protocols/nfs/export-policies/{id}") @Headers({"Authorization: {authHeader}"}) void deleteExportPolicyById(@Param("authHeader") String authHeader, @Param("id") String id); - @RequestLine("PATCH /{id}") + @RequestLine("PATCH /api/protocols/nfs/export-policies/{id}") @Headers({"Authorization: {authHeader}"}) OntapResponse updateExportPolicy(@Param("authHeader") String authHeader, @Param("id") String id, diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 838aece8201e..17be8d9508d1 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -188,17 +188,22 @@ public DataStore initialize(Map dsInfos) { // Determine storage pool type and path based on protocol String path; + String host = ""; ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); switch (protocol) { case NFS: parameters.setType(Storage.StoragePoolType.NetworkFilesystem); - path = details.get(Constants.MANAGEMENT_LIF) + ":/" + storagePoolName; + // Path should be just the NFS export path (junction path), NOT host:path + // CloudStack will construct the full mount path as: hostAddress + ":" + path + path = "/" + storagePoolName; s_logger.info("Setting NFS path for storage pool: " + path); + host = "10.193.192.136"; // TODO hardcoded for now break; case ISCSI: parameters.setType(Storage.StoragePoolType.Iscsi); path = "iqn.1992-08.com.netapp:" + details.get(Constants.SVM_NAME) + "." + storagePoolName; s_logger.info("Setting iSCSI path for storage pool: " + path); + parameters.setHost(details.get(Constants.MANAGEMENT_LIF)); break; default: throw new CloudRuntimeException("Unsupported protocol: " + protocol + ", cannot create primary storage"); @@ -239,8 +244,8 @@ public DataStore initialize(Map dsInfos) { } // Set parameters for primary data store - parameters.setHost(details.get(Constants.MANAGEMENT_LIF)); parameters.setPort(Constants.ONTAP_PORT); + parameters.setHost(host); parameters.setPath(path); parameters.setTags(tags != null ? tags : ""); parameters.setIsTagARule(isTagARule != null ? isTagARule : Boolean.FALSE); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java new file mode 100644 index 000000000000..cf9cd5510ce0 --- /dev/null +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -0,0 +1,168 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.listener; + +import javax.inject.Inject; + +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.alert.AlertManager; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.host.Host; +import com.cloud.storage.StoragePool; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import com.cloud.host.dao.HostDao; + +/** + * HypervisorHostListener implementation for ONTAP storage. + * Handles connecting/disconnecting hosts to/from ONTAP-backed storage pools. + */ +public class OntapHostListener implements HypervisorHostListener { + protected Logger logger = LogManager.getLogger(getClass()); + + @Inject + private AgentManager _agentMgr; + @Inject + private AlertManager _alertMgr; + @Inject + private PrimaryDataStoreDao _storagePoolDao; + @Inject + private HostDao _hostDao; + @Inject private StoragePoolHostDao storagePoolHostDao; + + + @Override + public boolean hostConnect(long hostId, long poolId) { + logger.info("Connect to host " + hostId + " from pool " + poolId); + Host host = _hostDao.findById(hostId); + if (host == null) { + logger.error("Failed to add host by HostListener as host was not found with id : {}", hostId); + return false; + } + + // TODO add host type check also since we support only KVM for now, host.getHypervisorType().equals(HypervisorType.KVM) + StoragePool pool = _storagePoolDao.findById(poolId); + logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName()); + + + // incase host was not added by cloudstack , we will add it + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); + + if (storagePoolHost == null) { + storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); + + storagePoolHostDao.persist(storagePoolHost); + } + + // Validate pool type - ONTAP supports NFS and iSCSI +// StoragePoolType poolType = pool.getPoolType(); +// // TODO add iscsi also here +// if (poolType != StoragePoolType.NetworkFilesystem) { +// logger.error("Unsupported pool type {} for ONTAP storage", poolType); +// return false; +// } + + try { + // Create the CreateStoragePoolCommand to send to the agent + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); + + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer == null) { + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command (%s)", pool)); + } + + if (!answer.getResult()) { + String msg = String.format("Unable to attach storage pool %s to host %d", pool, hostId); + + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg); + + throw new CloudRuntimeException(String.format( + "Unable to establish a connection from agent to storage pool %s due to %s", pool, answer.getDetails())); + } + } catch (Exception e) { + logger.error("Exception while connecting host {} to storage pool {}", host.getName(), pool.getName(), e); + throw new CloudRuntimeException("Failed to connect host to storage pool: " + e.getMessage(), e); + } + return true; + } + + @Override + public boolean hostDisconnected(Host host, StoragePool pool) { + logger.info("Disconnect from host " + host.getId() + " from pool " + pool.getName()); + + Host hostToremove = _hostDao.findById(host.getId()); + if (hostToremove == null) { + logger.error("Failed to add host by HostListener as host was not found with id : {}", host.getId()); + return false; + } + // TODO add storage pool get validation + logger.info("Disconnecting host {} from ONTAP storage pool {}", host.getName(), pool.getName()); + + try { + DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand(pool); + long hostId = host.getId(); + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer != null && answer.getResult()) { + logger.info("Successfully disconnected host {} from ONTAP storage pool {}", host.getName(), pool.getName()); + return true; + } else { + String errMsg = (answer != null) ? answer.getDetails() : "Unknown error"; + logger.warn("Failed to disconnect host {} from storage pool {}. Error: {}", host.getName(), pool.getName(), errMsg); + return false; + } + } catch (Exception e) { + logger.error("Exception while disconnecting host {} from storage pool {}", host.getName(), pool.getName(), e); + return false; + } + } + + @Override + public boolean hostDisconnected(long hostId, long poolId) { + return false; + } + + @Override + public boolean hostAboutToBeRemoved(long hostId) { + return false; + } + + @Override + public boolean hostRemoved(long hostId, long clusterId) { + return false; + } + + @Override + public boolean hostEnabled(long hostId) { + return false; + } + + @Override + public boolean hostAdded(long hostId) { + return false; + } + +} diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java deleted file mode 100755 index 3fb6406aa9cc..000000000000 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapHostListener.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.cloudstack.storage.provider; -// -//import com.cloud.exception.StorageConflictException; -//import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; -// -//public class OntapHostListener implements HypervisorHostListener { -// -// public OntapHostListener(){} -// -// @Override -// public boolean hostAdded(long hostId) { -// return false; -// } -// -// @Override -// public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { -// return false; -// } -// -// @Override -// public boolean hostDisconnected(long hostId, long poolId) { -// return false; -// } -// -// @Override -// public boolean hostAboutToBeRemoved(long hostId) { -// return false; -// } -// -// @Override -// public boolean hostRemoved(long hostId, long clusterId) { -// return false; -// } -// -// @Override -// public boolean hostEnabled(long hostId) { -// return false; -// } -//} \ No newline at end of file diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java index d954d1d413cf..4079792f87d8 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java @@ -27,6 +27,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; import org.apache.cloudstack.storage.driver.OntapPrimaryDatastoreDriver; import org.apache.cloudstack.storage.lifecycle.OntapPrimaryDatastoreLifecycle; +import org.apache.cloudstack.storage.listener.OntapHostListener; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.springframework.stereotype.Component; @@ -41,7 +42,7 @@ public class OntapPrimaryDatastoreProvider implements PrimaryDataStoreProvider { private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreProvider.class); private OntapPrimaryDatastoreDriver primaryDatastoreDriver; private OntapPrimaryDatastoreLifecycle primaryDatastoreLifecycle; - // private HypervisorHostListener listener; + private HypervisorHostListener listener; public OntapPrimaryDatastoreProvider() { s_logger.info("OntapPrimaryDatastoreProvider initialized"); @@ -58,7 +59,7 @@ public DataStoreDriver getDataStoreDriver() { @Override public HypervisorHostListener getHostListener() { - return null; + return listener; } @Override @@ -72,7 +73,7 @@ public boolean configure(Map params) { s_logger.trace("OntapPrimaryDatastoreProvider: configure: Called"); primaryDatastoreDriver = ComponentContext.inject(OntapPrimaryDatastoreDriver.class); primaryDatastoreLifecycle = ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class); - // listener = ComponentContext.inject(OntapHostListener.class); + listener = ComponentContext.inject(OntapHostListener.class); return true; } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index 045cf23af67f..b11c60e63385 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -27,6 +27,7 @@ import org.apache.cloudstack.storage.feign.client.VolumeFeignClient; import org.apache.cloudstack.storage.feign.model.Aggregate; import org.apache.cloudstack.storage.feign.model.Job; +import org.apache.cloudstack.storage.feign.model.Nas; import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.Volume; @@ -150,10 +151,15 @@ public Volume createStorageVolume(String volumeName, Long size) { Svm svm = new Svm(); svm.setName(svmName); + Nas nas = new Nas(); + nas.setPath("/" + volumeName); + volumeRequest.setName(volumeName); volumeRequest.setSvm(svm); volumeRequest.setAggregates(aggregates); volumeRequest.setSize(size); + volumeRequest.setNas(nas); // be default if we don't set path , ONTAP create a volume with mount/junction path // TODO check if we need to append svm name or not + // since storage pool also cannot be duplicate so junction path can also be not duplicate so /volumeName will always be unique // Make the POST API call to create the volume try { /* diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index 1212d75903c7..4672edd22033 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -64,9 +64,10 @@ public UnifiedNASStrategy(OntapStorage ontapStorage) { String baseURL = Constants.HTTPS + ontapStorage.getManagementLIF(); // Initialize FeignClientFactory and create NAS client this.feignClientFactory = new FeignClientFactory(); + // NAS client uses export policy API endpoint this.nasFeignClient = feignClientFactory.createClient(NASFeignClient.class, baseURL); - this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class, baseURL); - this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL); + this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class,baseURL ); + this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL ); } public void setOntapStorage(OntapStorage ontapStorage) { @@ -108,51 +109,23 @@ CloudStackVolume getCloudStackVolume(CloudStackVolume cloudstackVolume) { @Override public AccessGroup createAccessGroup(AccessGroup accessGroup) { - // Create the export policy Map details = accessGroup.getPrimaryDataStoreInfo().getDetails(); String svmName = details.get(Constants.SVM_NAME); String volumeUUID = details.get(Constants.VOLUME_UUID); String volumeName = details.get(Constants.VOLUME_NAME); - String exportPolicyName = "export-" + svmName + "-" + volumeName;// TODO move this to util - - ExportPolicy exportPolicy = new ExportPolicy(); - - List rules = new ArrayList<>(); - ExportRule exportRule = new ExportRule(); - - List exportClients = new ArrayList<>(); - List hosts = accessGroup.getHostsToConnect(); - for (HostVO host : hosts) { - String hostStorageIp = host.getStorageIpAddress(); - String ip = (hostStorageIp != null && !hostStorageIp.isEmpty()) - ? hostStorageIp - : host.getPrivateIpAddress(); - String ipToUse = ip + "/32"; - ExportRule.ExportClient exportClient = new ExportRule.ExportClient(); - exportClient.setMatch(ipToUse); - exportClients.add(exportClient); - } - exportRule.setClients(exportClients); - exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.any)); - exportRule.setRoRule(List.of("any")); - exportRule.setRwRule(List.of("any")); - rules.add(exportRule); - Svm svm = new Svm(); - svm.setName(svmName); - exportPolicy.setSvm(svm); - exportPolicy.setRules(rules); - exportPolicy.setName(exportPolicyName); + // Create the export policy + ExportPolicy policyRequest = createExportPolicyRequest(accessGroup,svmName,volumeName); try { - createExportPolicy(svmName, exportPolicy); - s_logger.info("ExportPolicy created: {}, now attaching this policy to storage pool volume", exportPolicy.getName()); + createExportPolicy(svmName, policyRequest); + s_logger.info("ExportPolicy created: {}, now attaching this policy to storage pool volume", policyRequest.getName()); // attach export policy to volume of storage pool - assignExportPolicyToVolume(volumeUUID,exportPolicy.getName()); - s_logger.info("Successfully assigned exportPolicy {} to volume {}", exportPolicy.getName(), volumeName); - accessGroup.setPolicy(exportPolicy); + assignExportPolicyToVolume(volumeUUID,policyRequest.getName()); + s_logger.info("Successfully assigned exportPolicy {} to volume {}", policyRequest.getName(), volumeName); + accessGroup.setPolicy(policyRequest); return accessGroup; - }catch (Exception e){ + }catch(Exception e){ s_logger.error("Exception occurred while creating access group: " + e); throw new CloudRuntimeException("Failed to create access group: " + e); } @@ -192,6 +165,18 @@ private void createExportPolicy(String svmName, ExportPolicy policy) { try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); nasFeignClient.createExportPolicy(authHeader, policy); + try { + Map queryParams = Map.of(Constants.NAME, policy.getName()); + OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams); + if (policiesResponse == null || policiesResponse.getRecords().isEmpty()) { + throw new CloudRuntimeException("Export policy " + policy.getName() + " was not created on ONTAP. " + + "Received successful response but policy does not exist."); + } + s_logger.info("Export policy created and verified successfully: " + policy.getName()); + } catch (FeignException e) { + s_logger.error("Failed to verify export policy creation: " + policy.getName(), e); + throw new CloudRuntimeException("Export policy creation verification failed: " + e.getMessage()); + } s_logger.info("Export policy created successfully with name {}", policy.getName()); } catch (FeignException e) { s_logger.error("Failed to create export policy: {}", policy, e); @@ -205,13 +190,14 @@ private void createExportPolicy(String svmName, ExportPolicy policy) { private void deleteExportPolicy(String svmName, String policyName) { try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - ExportPolicy policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); + Map queryParams = Map.of(Constants.NAME, policyName); + OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams); if (policiesResponse == null ) { s_logger.warn("Export policy not found for deletion: {}", policyName); throw new CloudRuntimeException("Export policy not found : " + policyName); } - String policyId = policiesResponse.getId().toString(); + String policyId = String.valueOf(policiesResponse.getRecords().get(0).getId()); nasFeignClient.deleteExportPolicyById(authHeader, policyId); s_logger.info("Export policy deleted successfully: {}", policyName); } catch (Exception e) { @@ -230,15 +216,18 @@ private void assignExportPolicyToVolume(String volumeUuid, String policyName) { try { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - ExportPolicy policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader); - if (policiesResponse == null) { + Map queryParams = Map.of(Constants.NAME, policyName); + OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams); + if (policiesResponse == null || policiesResponse.getRecords().isEmpty()) { + s_logger.error("Export policy not found for assigning rule: {}", policyName); throw new CloudRuntimeException("Export policy not found: " + policyName); } + // Create Volume update object with NAS configuration Volume volumeUpdate = new Volume(); Nas nas = new Nas(); ExportPolicy policy = new ExportPolicy(); - policy.setName(policiesResponse.getName()); + policy.setName(policyName); nas.setExportPolicy(policy); volumeUpdate.setNas(nas); @@ -250,7 +239,7 @@ private void assignExportPolicyToVolume(String volumeUuid, String policyName) { */ JobResponse jobResponse = volumeFeignClient.updateVolumeRebalancing(authHeader, volumeUuid, volumeUpdate); if (jobResponse == null || jobResponse.getJob() == null) { - throw new CloudRuntimeException("Failed to attach policy " + policiesResponse.getName() + "to volume " + volumeUuid); + throw new CloudRuntimeException("Failed to attach policy " + policyName + "to volume " + volumeUuid); } String jobUUID = jobResponse.getJob().getUuid(); @@ -334,14 +323,14 @@ private OntapResponse getFileInfo(String volumeUuid, String filePath) OntapResponse response = nasFeignClient.getFileResponse(authHeader, volumeUuid, filePath); s_logger.debug("Retrieved file info for: {} in volume: {}", filePath, volumeUuid); return response; - } catch (FeignException e) { + } catch (FeignException e){ if (e.status() == 404) { s_logger.debug("File not found: {} in volume: {}", filePath, volumeUuid); return null; } s_logger.error("Failed to get file info: {} in volume: {}", filePath, volumeUuid, e); throw new CloudRuntimeException("Failed to get file info: " + e.getMessage()); - } catch (Exception e) { + } catch (Exception e){ s_logger.error("Exception while getting file info: {} in volume: {}", filePath, volumeUuid, e); throw new CloudRuntimeException("Failed to get file info: " + e.getMessage()); } @@ -358,9 +347,48 @@ private boolean updateFile(String volumeUuid, String filePath, FileInfo fileInfo } catch (FeignException e) { s_logger.error("Failed to update file: {} in volume: {}", filePath, volumeUuid, e); return false; - } catch (Exception e) { + } catch (Exception e){ s_logger.error("Exception while updating file: {} in volume: {}", filePath, volumeUuid, e); return false; } } + + private String generateExportPolicyName(String svmName, String volumeName){ + return Constants.EXPORT + Constants.HYPHEN + svmName + Constants.HYPHEN + volumeName; + } + + private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String svmName , String volumeName){ + + String exportPolicyName = generateExportPolicyName(svmName,volumeName); + ExportPolicy exportPolicy = new ExportPolicy(); + + List rules = new ArrayList<>(); + ExportRule exportRule = new ExportRule(); + + List exportClients = new ArrayList<>(); + List hosts = accessGroup.getHostsToConnect(); + for (HostVO host : hosts) { + String hostStorageIp = host.getStorageIpAddress(); + String ip = (hostStorageIp != null && !hostStorageIp.isEmpty()) + ? hostStorageIp + : host.getPrivateIpAddress(); + String ipToUse = ip + "/32"; + ExportRule.ExportClient exportClient = new ExportRule.ExportClient(); + exportClient.setMatch(ipToUse); + exportClients.add(exportClient); + } + exportRule.setClients(exportClients); + exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.any)); + exportRule.setRoRule(List.of("any")); + exportRule.setRwRule(List.of("any")); + rules.add(exportRule); + + Svm svm = new Svm(); + svm.setName(svmName); + exportPolicy.setSvm(svm); + exportPolicy.setRules(rules); + exportPolicy.setName(exportPolicyName); + + return exportPolicy; + } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java index a81fdb0a8ab5..a885179da05b 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java @@ -33,6 +33,7 @@ public class Constants { public static final String RUNNING = "running"; public static final String VOLUME_UUID = "volumeUUID"; public static final String VOLUME_NAME = "volumeNAME"; + public static final String EXPORT = "export"; public static final int ONTAP_PORT = 443; @@ -55,6 +56,7 @@ public class Constants { public static final String EQUALS = "="; public static final String SEMICOLON = ";"; public static final String COMMA = ","; + public static final String HYPHEN = "-"; public static final String VOLUME_PATH_PREFIX = "/vol/"; From f89bb1b0a6c0efe8d2ce0fd634b06c4b73c983ac Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Fri, 14 Nov 2025 13:47:43 +0530 Subject: [PATCH 6/9] storage pool mounting on host 1 --- .../driver/OntapPrimaryDatastoreDriver.java | 1 + .../storage/listener/OntapHostListener.java | 55 +++++++++++++------ .../storage/service/UnifiedNASStrategy.java | 2 +- 3 files changed, 39 insertions(+), 19 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 979e761eb4b0..17a23aeec47f 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -116,6 +116,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet createCmdResult = new CreateCmdResult(null, new Answer(null, false, errMsg)); createCmdResult.setResult(e.toString()); } finally { + s_logger.info("Volume creation successfully completed"); callback.complete(createCmdResult); } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java index cf9cd5510ce0..1b24d22fd285 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -20,6 +20,8 @@ import javax.inject.Inject; import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.agent.api.ModifyStoragePoolAnswer; +import com.cloud.agent.api.StoragePoolInfo; import com.cloud.alert.AlertManager; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.dao.StoragePoolHostDao; @@ -32,6 +34,7 @@ import com.cloud.storage.StoragePool; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import com.cloud.host.dao.HostDao; @@ -67,25 +70,8 @@ public boolean hostConnect(long hostId, long poolId) { logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName()); - // incase host was not added by cloudstack , we will add it - StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); - - if (storagePoolHost == null) { - storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); - - storagePoolHostDao.persist(storagePoolHost); - } - - // Validate pool type - ONTAP supports NFS and iSCSI -// StoragePoolType poolType = pool.getPoolType(); -// // TODO add iscsi also here -// if (poolType != StoragePoolType.NetworkFilesystem) { -// logger.error("Unsupported pool type {} for ONTAP storage", poolType); -// return false; -// } - try { - // Create the CreateStoragePoolCommand to send to the agent + // Create the ModifyStoragePoolCommand to send to the agent ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); Answer answer = _agentMgr.easySend(hostId, cmd); @@ -102,6 +88,39 @@ public boolean hostConnect(long hostId, long poolId) { throw new CloudRuntimeException(String.format( "Unable to establish a connection from agent to storage pool %s due to %s", pool, answer.getDetails())); } + + // Get the mount path from the answer + ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer; + StoragePoolInfo poolInfo = mspAnswer.getPoolInfo(); + if (poolInfo == null) { + throw new CloudRuntimeException("ModifyStoragePoolAnswer returned null poolInfo"); + } + + String localPath = poolInfo.getLocalPath(); + logger.info("Storage pool {} successfully mounted at: {}", pool.getName(), localPath); + + // Update or create the storage_pool_host_ref entry with the correct local_path + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); + + if (storagePoolHost == null) { + storagePoolHost = new StoragePoolHostVO(poolId, hostId, localPath); + storagePoolHostDao.persist(storagePoolHost); + logger.info("Created storage_pool_host_ref entry for pool {} and host {}", pool.getName(), host.getName()); + } else { + storagePoolHost.setLocalPath(localPath); + storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost); + logger.info("Updated storage_pool_host_ref entry with local_path: {}", localPath); + } + + // Update pool capacity/usage information + StoragePoolVO poolVO = _storagePoolDao.findById(poolId); + if (poolVO != null && poolInfo.getCapacityBytes() > 0) { + poolVO.setCapacityBytes(poolInfo.getCapacityBytes()); + poolVO.setUsedBytes(poolInfo.getCapacityBytes() - poolInfo.getAvailableBytes()); + _storagePoolDao.update(poolVO.getId(), poolVO); + logger.info("Updated storage pool capacity: {} GB, used: {} GB", poolInfo.getCapacityBytes() / (1024 * 1024 * 1024), (poolInfo.getCapacityBytes() - poolInfo.getAvailableBytes()) / (1024 * 1024 * 1024)); + } + } catch (Exception e) { logger.error("Exception while connecting host {} to storage pool {}", host.getName(), pool.getName(), e); throw new CloudRuntimeException("Failed to connect host to storage pool: " + e.getMessage(), e); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index 4672edd22033..883bd554b767 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -372,7 +372,7 @@ private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String sv String ip = (hostStorageIp != null && !hostStorageIp.isEmpty()) ? hostStorageIp : host.getPrivateIpAddress(); - String ipToUse = ip + "/32"; + String ipToUse = ip + "/31"; // TODO since we have 2 IPs internal and external ExportRule.ExportClient exportClient = new ExportRule.ExportClient(); exportClient.setMatch(ipToUse); exportClients.add(exportClient); From fe19c55abfa8d60b68516e013919094257a4a362 Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Mon, 17 Nov 2025 17:00:56 +0530 Subject: [PATCH 7/9] vm restart issue --- .../storage/feign/model/ExportRule.java | 11 +++++++++++ .../OntapPrimaryDatastoreLifecycle.java | 2 ++ .../storage/listener/OntapHostListener.java | 18 ++++++++++++++++-- .../storage/service/UnifiedNASStrategy.java | 1 + 4 files changed, 30 insertions(+), 2 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java index 769f94836b31..788fc8b5544d 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java @@ -82,6 +82,9 @@ public static ProtocolsEnum fromValue(String text) { @JsonProperty("rw_rule") private List rwRule = null; + @JsonProperty("superuser") + private List superuser = null; + public ExportRule anonymousUser(String anonymousUser) { this.anonymousUser = anonymousUser; @@ -163,6 +166,14 @@ public void setRoRule(List roRule) { this.roRule = roRule; } + public List getSuperuser() { + return superuser; + } + + public void setSuperuser(List superuser) { + this.superuser = superuser; + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 17be8d9508d1..64a39056f31f 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -291,6 +291,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + throw new CloudRuntimeException("Failed to attach storage pool to cluster: " + e.getMessage(), e); } } _dataStoreHelper.attachCluster(dataStore); @@ -327,6 +328,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + throw new CloudRuntimeException("Failed to attach storage pool to host: " + e.getMessage(), e); } } _dataStoreHelper.attachZone(dataStore); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java index 1b24d22fd285..9db7774bc21b 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -67,9 +67,21 @@ public boolean hostConnect(long hostId, long poolId) { // TODO add host type check also since we support only KVM for now, host.getHypervisorType().equals(HypervisorType.KVM) StoragePool pool = _storagePoolDao.findById(poolId); + if (pool == null) { + logger.error("Failed to connect host - storage pool not found with id: {}", poolId); + return false; + } + + // CRITICAL: Check if already connected to avoid infinite loops + StoragePoolHostVO existingConnection = storagePoolHostDao.findByPoolHost(poolId, hostId); + if (existingConnection != null && existingConnection.getLocalPath() != null && !existingConnection.getLocalPath().isEmpty()) { + logger.info("Host {} is already connected to storage pool {} at path {}. Skipping reconnection.", + host.getName(), pool.getName(), existingConnection.getLocalPath()); + return true; + } + logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName()); - try { // Create the ModifyStoragePoolCommand to send to the agent ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); @@ -123,7 +135,9 @@ public boolean hostConnect(long hostId, long poolId) { } catch (Exception e) { logger.error("Exception while connecting host {} to storage pool {}", host.getName(), pool.getName(), e); - throw new CloudRuntimeException("Failed to connect host to storage pool: " + e.getMessage(), e); + // CRITICAL: Don't throw exception - it crashes the agent and causes restart loops + // Return false to indicate failure without crashing + return false; } return true; } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index 883bd554b767..6044ca86230e 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -381,6 +381,7 @@ private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String sv exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.any)); exportRule.setRoRule(List.of("any")); exportRule.setRwRule(List.of("any")); + exportRule.setSuperuser(List.of("any")); // Allow root/superuser access for NFS writes rules.add(exportRule); Svm svm = new Svm(); From e27fad26bac8286362cd6ee1dfd549173c58478f Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Mon, 17 Nov 2025 17:02:55 +0530 Subject: [PATCH 8/9] vm restart issue 1 --- .../cloudstack/storage/listener/OntapHostListener.java | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java index 9db7774bc21b..2fe8bf28fbe6 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -71,17 +71,13 @@ public boolean hostConnect(long hostId, long poolId) { logger.error("Failed to connect host - storage pool not found with id: {}", poolId); return false; } - // CRITICAL: Check if already connected to avoid infinite loops StoragePoolHostVO existingConnection = storagePoolHostDao.findByPoolHost(poolId, hostId); if (existingConnection != null && existingConnection.getLocalPath() != null && !existingConnection.getLocalPath().isEmpty()) { - logger.info("Host {} is already connected to storage pool {} at path {}. Skipping reconnection.", - host.getName(), pool.getName(), existingConnection.getLocalPath()); + logger.info("Host {} is already connected to storage pool {} at path {}. Skipping reconnection.", host.getName(), pool.getName(), existingConnection.getLocalPath()); return true; } - logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName()); - try { // Create the ModifyStoragePoolCommand to send to the agent ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); From e8062fdb87f04f248a6fbbb0549241214fcde4ec Mon Sep 17 00:00:00 2001 From: "Srivastava, Piyush" Date: Mon, 17 Nov 2025 19:13:10 +0530 Subject: [PATCH 9/9] vm restart issue 2 --- .../storage/lifecycle/OntapPrimaryDatastoreLifecycle.java | 4 ++-- .../cloudstack/storage/listener/OntapHostListener.java | 1 + .../cloudstack/storage/service/UnifiedNASStrategy.java | 8 ++++---- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 64a39056f31f..676192d7ed96 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -291,7 +291,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); - throw new CloudRuntimeException("Failed to attach storage pool to cluster: " + e.getMessage(), e); + return false; } } _dataStoreHelper.attachCluster(dataStore); @@ -328,7 +328,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); - throw new CloudRuntimeException("Failed to attach storage pool to host: " + e.getMessage(), e); + return false; } } _dataStoreHelper.attachZone(dataStore); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java index 2fe8bf28fbe6..3b031cbb6a24 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -77,6 +77,7 @@ public boolean hostConnect(long hostId, long poolId) { logger.info("Host {} is already connected to storage pool {} at path {}. Skipping reconnection.", host.getName(), pool.getName(), existingConnection.getLocalPath()); return true; } + logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName()); try { // Create the ModifyStoragePoolCommand to send to the agent diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index 6044ca86230e..8410fd23534a 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -372,16 +372,16 @@ private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String sv String ip = (hostStorageIp != null && !hostStorageIp.isEmpty()) ? hostStorageIp : host.getPrivateIpAddress(); - String ipToUse = ip + "/31"; // TODO since we have 2 IPs internal and external + String ipToUse = ip + "/32"; ExportRule.ExportClient exportClient = new ExportRule.ExportClient(); exportClient.setMatch(ipToUse); exportClients.add(exportClient); } exportRule.setClients(exportClients); exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.any)); - exportRule.setRoRule(List.of("any")); - exportRule.setRwRule(List.of("any")); - exportRule.setSuperuser(List.of("any")); // Allow root/superuser access for NFS writes + exportRule.setRoRule(List.of("sys")); // Use sys (Unix UID/GID) authentication for NFS + exportRule.setRwRule(List.of("sys")); // Use sys (Unix UID/GID) authentication for NFS + exportRule.setSuperuser(List.of("sys")); // Allow root/superuser access with sys auth rules.add(exportRule); Svm svm = new Svm();