diff --git a/plugins/storage/volume/ontap/pom.xml b/plugins/storage/volume/ontap/pom.xml
index 10ca7935f408..5cd012f82f43 100644
--- a/plugins/storage/volume/ontap/pom.xml
+++ b/plugins/storage/volume/ontap/pom.xml
@@ -24,14 +24,13 @@
org.apache.cloudstack
cloudstack-plugins
- 4.22.0.0-SNAPSHOT
+ 4.23.0.0-SNAPSHOT
../../../pom.xml
2021.0.7
11.0
20230227
- 2.15.2
4.5.14
1.6.2
3.8.1
@@ -77,7 +76,7 @@
com.fasterxml.jackson.core
jackson-databind
- ${jackson-databind.version}
+ 2.13.4
org.apache.httpcomponents
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java
index e2eb6220230a..c3aaae7efcc2 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java
@@ -27,6 +27,7 @@
import com.cloud.storage.Storage;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
+import com.cloud.storage.VolumeVO;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
@@ -64,13 +65,14 @@ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver {
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
@Inject private PrimaryDataStoreDao storagePoolDao;
+ @Inject private com.cloud.storage.dao.VolumeDao volumeDao;
@Override
public Map getCapabilities() {
s_logger.trace("OntapPrimaryDatastoreDriver: getCapabilities: Called");
Map mapCapabilities = new HashMap<>();
-
- mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString());
- mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
+ // RAW managed initial implementation: snapshot features not yet supported
+ mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.FALSE.toString());
+ mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.FALSE.toString());
return mapCapabilities;
}
@@ -116,25 +118,131 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet
createCmdResult = new CreateCmdResult(null, new Answer(null, false, errMsg));
createCmdResult.setResult(e.toString());
} finally {
+ if (createCmdResult != null && createCmdResult.isSuccess()) {
+ s_logger.info("createAsync: Volume metadata created successfully. Path: {}", path);
+ }
callback.complete(createCmdResult);
}
}
+ /**
+ * Creates CloudStack volume based on storage protocol type (NFS or iSCSI).
+ *
+ * For Managed NFS (Option 2 Implementation):
+ * - Returns only UUID without creating qcow2 file
+ * - KVM hypervisor creates qcow2 file automatically during VM deployment
+ * - ONTAP volume provides the backing NFS storage
+ *
+ * For iSCSI/Block Storage:
+ * - Creates LUN via ONTAP REST API
+ * - Returns LUN path for direct attachment
+ */
private String createCloudStackVolumeForTypeVolume(DataStore dataStore, DataObject dataObject) {
StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
if(storagePool == null) {
- s_logger.error("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId());
- throw new CloudRuntimeException("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId());
+ s_logger.error("createCloudStackVolumeForTypeVolume: Storage Pool not found for id: {}", dataStore.getId());
+ throw new CloudRuntimeException("createCloudStackVolumeForTypeVolume: Storage Pool not found for id: " + dataStore.getId());
}
+
Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId());
+ String protocol = details.get(Constants.PROTOCOL);
+
+ if (ProtocolType.NFS.name().equalsIgnoreCase(protocol)) {
+ return createManagedNfsVolume(dataStore, dataObject, storagePool);
+ } else if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) {
+ return createManagedBlockVolume(dataStore, dataObject, storagePool, details);
+ } else {
+ String errMsg = String.format("createCloudStackVolumeForTypeVolume: Unsupported protocol [%s]", protocol);
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+ }
+
+ /**
+ * Creates Managed NFS Volume with ONTAP backing storage.
+ *
+ * Architecture: 1 CloudStack Storage Pool = 1 ONTAP Volume (shared by all volumes)
+ *
+ * Flow:
+ * 1. createAsync() stores volume metadata and NFS mount point
+ * 2. Volume attach triggers ManagedNfsStorageAdaptor.connectPhysicalDisk()
+ * 3. KVM mounts: nfs://nfsServer/junctionPath to /mnt/volumeUuid
+ * 4. Libvirt creates qcow2 file via storageVolCreateXML()
+ * 5. File created at: /vol/ontap_volume/volumeUuid (on ONTAP)
+ *
+ * Key Details:
+ * - All volumes in same pool share the same ONTAP volume NFS export
+ * - Each volume gets separate libvirt mount point: /mnt/
+ * - All qcow2 files stored in same ONTAP volume: /vol//
+ * - volume._iScsiName stores the NFS junction path (pool.path)
+ *
+ * @param dataStore CloudStack data store (storage pool)
+ * @param dataObject Volume data object
+ * @param storagePool Storage pool VO
+ * @return Volume UUID (used as filename for qcow2 file)
+ */
+ private String createManagedNfsVolume(DataStore dataStore, DataObject dataObject, StoragePoolVO storagePool) {
+ VolumeInfo volumeInfo = (VolumeInfo) dataObject;
+ VolumeVO volume = volumeDao.findById(volumeInfo.getId());
+ String volumeUuid = volumeInfo.getUuid();
+
+ // Get the NFS junction path from storage pool
+ // This is the path that was set during pool creation (e.g., "/my_pool_volume")
+ String junctionPath = storagePool.getPath();
+
+ // Update volume metadata in CloudStack database
+ volume.setPoolType(Storage.StoragePoolType.ManagedNFS);
+ volume.setPoolId(dataStore.getId());
+ volume.setPath(volumeUuid); // Filename for qcow2 file
+
+ // CRITICAL: Store junction path in _iScsiName field
+ // CloudStack will use this in AttachCommand as DiskTO.MOUNT_POINT
+ // ManagedNfsStorageAdaptor will mount: nfs://hostAddress/junctionPath to /mnt/volumeUuid
+ volume.set_iScsiName(junctionPath);
+
+ volumeDao.update(volume.getId(), volume);
+
+ s_logger.info("ONTAP Managed NFS Volume Created: uuid={}, path={}, junctionPath={}, format=QCOW2, " +
+ "pool={}, size={}GB. Libvirt will create qcow2 file at mount time.",
+ volumeUuid, volumeUuid, junctionPath, storagePool.getName(),
+ volumeInfo.getSize() / (1024 * 1024 * 1024));
+
+ // Optional: Prepare ONTAP volume for optimal qcow2 storage (future enhancement)
+ // prepareOntapVolumeForQcow2Storage(dataStore, volumeInfo);
+
+ return volumeUuid;
+ }
+
+ /**
+ * Creates iSCSI/Block volume by calling ONTAP REST API to create a LUN.
+ *
+ * For block storage (iSCSI), the storage provider must create the LUN
+ * before CloudStack can use it. This is different from NFS where the
+ * hypervisor creates the file.
+ *
+ * @param dataStore CloudStack data store
+ * @param dataObject Volume data object
+ * @param storagePool Storage pool VO
+ * @param details Storage pool details containing ONTAP connection info
+ * @return LUN path/name for iSCSI attachment
+ */
+ private String createManagedBlockVolume(DataStore dataStore, DataObject dataObject,
+ StoragePoolVO storagePool, Map details) {
StorageStrategy storageStrategy = getStrategyByStoragePoolDetails(details);
- s_logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(Constants.SVM_NAME));
- CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, dataObject);
+
+ s_logger.info("createManagedBlockVolume: Creating iSCSI LUN on ONTAP SVM [{}]", details.get(Constants.SVM_NAME));
+
+ CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, (VolumeInfo) dataObject);
+
CloudStackVolume cloudStackVolume = storageStrategy.createCloudStackVolume(cloudStackVolumeRequest);
- if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL)) && cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) {
- return cloudStackVolume.getLun().getName();
+
+ if (cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) {
+ String lunPath = cloudStackVolume.getLun().getName();
+ s_logger.info("createManagedBlockVolume: iSCSI LUN created successfully: {}", lunPath);
+ return lunPath;
} else {
- String errMsg = "createCloudStackVolumeForTypeVolume: Volume creation failed. Lun or Lun Path is null for dataObject: " + dataObject;
+ String errMsg = String.format("createManagedBlockVolume: LUN creation failed for volume [%s]. " +
+ "LUN or LUN path is null.", dataObject.getUuid());
s_logger.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
@@ -142,7 +250,24 @@ private String createCloudStackVolumeForTypeVolume(DataStore dataStore, DataObje
@Override
public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback callback) {
-
+ CommandResult commandResult = new CommandResult();
+ try {
+ if (store == null || data == null) {
+ throw new CloudRuntimeException("deleteAsync: store or data is null");
+ }
+ if (data.getType() == DataObjectType.VOLUME) {
+ StoragePoolVO storagePool = storagePoolDao.findById(store.getId());
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(store.getId());
+ if (ProtocolType.NFS.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) {
+ // ManagedNFS qcow2 backing file deletion handled by KVM host/libvirt; nothing to do via ONTAP REST.
+ s_logger.info("deleteAsync: ManagedNFS volume {} no-op ONTAP deletion", data.getId());
+ }
+ }
+ } catch (Exception e) {
+ commandResult.setResult(e.getMessage());
+ } finally {
+ callback.complete(commandResult);
+ }
}
@Override
@@ -177,7 +302,6 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore
@Override
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) {
-
}
@Override
@@ -217,7 +341,7 @@ public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, Qual
@Override
public boolean canProvideStorageStats() {
- return true;
+ return false;
}
@Override
@@ -227,7 +351,7 @@ public Pair getStorageStats(StoragePool storagePool) {
@Override
public boolean canProvideVolumeStats() {
- return true;
+ return false; // Not yet implemented for RAW managed NFS
}
@Override
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java
index ce2783add228..e9c504e8de71 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java
@@ -1,3 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
package org.apache.cloudstack.storage.feign;
import feign.RequestInterceptor;
@@ -11,7 +30,7 @@
import feign.codec.EncodeException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.DeserializationFeature;
-import com.fasterxml.jackson.databind.json.JsonMapper;
+import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
@@ -36,13 +55,11 @@ public class FeignConfiguration {
private final int retryMaxInterval = 5;
private final String ontapFeignMaxConnection = "80";
private final String ontapFeignMaxConnectionPerRoute = "20";
- private final JsonMapper jsonMapper;
+ private final ObjectMapper jsonMapper;
public FeignConfiguration() {
- this.jsonMapper = JsonMapper.builder()
- .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES)
- .findAndAddModules()
- .build();
+ this.jsonMapper = new ObjectMapper();
+ this.jsonMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
public Client createClient() {
@@ -120,16 +137,43 @@ public Decoder createDecoder() {
@Override
public Object decode(Response response, Type type) throws IOException, DecodeException {
if (response.body() == null) {
+ logger.debug("Response body is null, returning null");
return null;
}
String json = null;
try (InputStream bodyStream = response.body().asInputStream()) {
json = new String(bodyStream.readAllBytes(), StandardCharsets.UTF_8);
logger.debug("Decoding JSON response: {}", json);
- return jsonMapper.readValue(json, jsonMapper.getTypeFactory().constructType(type));
+ logger.debug("Target type: {}", type);
+ logger.debug("About to call jsonMapper.readValue()...");
+
+ Object result = null;
+ try {
+ logger.debug("Calling jsonMapper.constructType()...");
+ var javaType = jsonMapper.getTypeFactory().constructType(type);
+ logger.debug("constructType() returned: {}", javaType);
+
+ logger.debug("Calling jsonMapper.readValue() with json and javaType...");
+ result = jsonMapper.readValue(json, javaType);
+ logger.debug("jsonMapper.readValue() completed successfully");
+ } catch (Throwable ex) {
+ logger.error("EXCEPTION in jsonMapper.readValue()! Type: {}, Message: {}", ex.getClass().getName(), ex.getMessage(), ex);
+ throw ex;
+ }
+
+ if (result == null) {
+ logger.warn("Decoded result is null!");
+ } else {
+ logger.debug("Successfully decoded to object of type: {}", result.getClass().getName());
+ }
+ logger.debug("Returning result from decoder");
+ return result;
} catch (IOException e) {
- logger.error("Error decoding JSON response. Status: {}, Raw body: {}", response.status(), json, e);
+ logger.error("IOException during decoding. Status: {}, Raw body: {}", response.status(), json, e);
throw new DecodeException(response.status(), "Error decoding JSON response", response.request(), e);
+ } catch (Exception e) {
+ logger.error("Unexpected error during decoding. Status: {}, Type: {}, Raw body: {}", response.status(), type, json, e);
+ throw new DecodeException(response.status(), "Unexpected error during decoding", response.request(), e);
}
}
};
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java
index b7aac9954cfe..f48f83dc28de 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java
@@ -19,6 +19,7 @@
package org.apache.cloudstack.storage.feign.client;
+import feign.QueryMap;
import org.apache.cloudstack.storage.feign.model.ExportPolicy;
import org.apache.cloudstack.storage.feign.model.FileInfo;
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
@@ -26,58 +27,60 @@
import feign.Param;
import feign.RequestLine;
-//TODO: Proper URLs should be added in the RequestLine annotations below
+import java.util.Map;
+
public interface NASFeignClient {
// File Operations
- @RequestLine("GET /{volumeUuid}/files/{path}")
+ @RequestLine("GET /api/storage/volumes/{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
OntapResponse getFileResponse(@Param("authHeader") String authHeader,
- @Param("volumeUuid") String volumeUUID,
- @Param("path") String filePath);
+ @Param("volumeUuid") String volumeUUID,
+ @Param("path") String filePath);
- @RequestLine("DELETE /{volumeUuid}/files/{path}")
+ @RequestLine("DELETE /api/storage/volumes/{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
void deleteFile(@Param("authHeader") String authHeader,
- @Param("volumeUuid") String volumeUUID,
- @Param("path") String filePath);
+ @Param("volumeUuid") String volumeUUID,
+ @Param("path") String filePath);
- @RequestLine("PATCH /{volumeUuid}/files/{path}")
+ @RequestLine("PATCH /api/storage/volumes/{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
void updateFile(@Param("authHeader") String authHeader,
- @Param("volumeUuid") String volumeUUID,
- @Param("path") String filePath, FileInfo fileInfo);
+ @Param("volumeUuid") String volumeUUID,
+ @Param("path") String filePath,
+ FileInfo fileInfo);
- @RequestLine("POST /{volumeUuid}/files/{path}")
+ @RequestLine("POST /api/storage/volumes/{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
void createFile(@Param("authHeader") String authHeader,
- @Param("volumeUuid") String volumeUUID,
- @Param("path") String filePath, FileInfo file);
+ @Param("volumeUuid") String volumeUUID,
+ @Param("path") String filePath,
+ FileInfo file);
// Export Policy Operations
- @RequestLine("POST /")
- @Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"})
- ExportPolicy createExportPolicy(@Param("authHeader") String authHeader,
- @Param("returnRecords") boolean returnRecords,
+ @RequestLine("POST /api/protocols/nfs/export-policies")
+ @Headers({"Authorization: {authHeader}"})
+ void createExportPolicy(@Param("authHeader") String authHeader,
ExportPolicy exportPolicy);
- @RequestLine("GET /")
+ @RequestLine("GET /api/protocols/nfs/export-policies")
@Headers({"Authorization: {authHeader}"})
- OntapResponse getExportPolicyResponse(@Param("authHeader") String authHeader);
+ OntapResponse getExportPolicyResponse(@Param("authHeader") String authHeader, @QueryMap Map queryMap);
- @RequestLine("GET /{id}")
+ @RequestLine("GET /api/protocols/nfs/export-policies/{id}")
@Headers({"Authorization: {authHeader}"})
- OntapResponse getExportPolicyById(@Param("authHeader") String authHeader,
- @Param("id") String id);
+ ExportPolicy getExportPolicyById(@Param("authHeader") String authHeader,
+ @Param("id") String id);
- @RequestLine("DELETE /{id}")
+ @RequestLine("DELETE /api/protocols/nfs/export-policies/{id}")
@Headers({"Authorization: {authHeader}"})
void deleteExportPolicyById(@Param("authHeader") String authHeader,
- @Param("id") String id);
+ @Param("id") String id);
- @RequestLine("PATCH /{id}")
+ @RequestLine("PATCH /api/protocols/nfs/export-policies/{id}")
@Headers({"Authorization: {authHeader}"})
OntapResponse updateExportPolicy(@Param("authHeader") String authHeader,
- @Param("id") String id,
- ExportPolicy request);
+ @Param("id") String id,
+ ExportPolicy request);
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java
index 9a2c76639221..4d946adbb124 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java
@@ -18,11 +18,15 @@
*/
package org.apache.cloudstack.storage.feign.client;
+import feign.QueryMap;
import org.apache.cloudstack.storage.feign.model.Volume;
import org.apache.cloudstack.storage.feign.model.response.JobResponse;
import feign.Headers;
import feign.Param;
import feign.RequestLine;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+
+import java.util.Map;
public interface VolumeFeignClient {
@@ -38,8 +42,11 @@ public interface VolumeFeignClient {
@Headers({"Authorization: {authHeader}"})
Volume getVolumeByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
+ @RequestLine("GET /api/storage/volumes")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getVolume(@Param("authHeader") String authHeader, @QueryMap Map queryMap);
+
@RequestLine("PATCH /api/storage/volumes/{uuid}")
- @Headers({"Accept: {acceptHeader}", "Authorization: {authHeader}"})
- JobResponse updateVolumeRebalancing(@Param("acceptHeader") String acceptHeader, @Param("uuid") String uuid, Volume volumeRequest);
+ @Headers({ "Authorization: {authHeader}"})
+ JobResponse updateVolumeRebalancing(@Param("authHeader") String authHeader, @Param("uuid") String uuid, Volume volumeRequest);
}
-
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java
index 8f3c9597dca7..788fc8b5544d 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java
@@ -76,6 +76,16 @@ public static ProtocolsEnum fromValue(String text) {
@JsonProperty("protocols")
private List protocols = null;
+ @JsonProperty("ro_rule")
+ private List roRule = null;
+
+ @JsonProperty("rw_rule")
+ private List rwRule = null;
+
+ @JsonProperty("superuser")
+ private List superuser = null;
+
+
public ExportRule anonymousUser(String anonymousUser) {
this.anonymousUser = anonymousUser;
return this;
@@ -140,6 +150,30 @@ public void setMatch (String match) {
}
}
+ public List getRwRule() {
+ return rwRule;
+ }
+
+ public void setRwRule(List rwRule) {
+ this.rwRule = rwRule;
+ }
+
+ public List getRoRule() {
+ return roRule;
+ }
+
+ public void setRoRule(List roRule) {
+ this.roRule = roRule;
+ }
+
+ public List getSuperuser() {
+ return superuser;
+ }
+
+ public void setSuperuser(List superuser) {
+ this.superuser = superuser;
+ }
+
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java
index f1a226739365..65821739f1b2 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java
@@ -144,4 +144,4 @@ public int hashCode() {
@JsonInclude(JsonInclude.Include.NON_NULL)
public static class Links { }
-}
\ No newline at end of file
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java
old mode 100644
new mode 100755
index 01b013f606dd..b567cfa00e6f
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java
@@ -38,12 +38,17 @@
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
+import org.apache.cloudstack.storage.feign.model.ExportPolicy;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Volume;
import org.apache.cloudstack.storage.provider.StorageProviderFactory;
import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
import org.apache.cloudstack.storage.service.model.ProtocolType;
import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -59,6 +64,7 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
@Inject private StorageManager _storageMgr;
@Inject private ResourceManager _resourceMgr;
@Inject private PrimaryDataStoreHelper _dataStoreHelper;
+ @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class);
// ONTAP minimum volume size is 1.56 GB (1677721600 bytes)
@@ -182,17 +188,22 @@ public DataStore initialize(Map dsInfos) {
// Determine storage pool type and path based on protocol
String path;
+ String host = "";
ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL));
switch (protocol) {
- case NFS3:
- parameters.setType(Storage.StoragePoolType.NetworkFilesystem);
- path = details.get(Constants.MANAGEMENT_LIF) + ":/" + storagePoolName;
+ case NFS:
+ parameters.setType(Storage.StoragePoolType.ManagedNFS);
+ // Path should be just the NFS export path (junction path), NOT host:path
+ // CloudStack will construct the full mount path as: hostAddress + ":" + path
+ path = "/" + storagePoolName;
s_logger.info("Setting NFS path for storage pool: " + path);
+ host = "10.193.192.136"; // TODO hardcoded for now
break;
case ISCSI:
parameters.setType(Storage.StoragePoolType.Iscsi);
path = "iqn.1992-08.com.netapp:" + details.get(Constants.SVM_NAME) + "." + storagePoolName;
s_logger.info("Setting iSCSI path for storage pool: " + path);
+ parameters.setHost(details.get(Constants.MANAGEMENT_LIF));
break;
default:
throw new CloudRuntimeException("Unsupported protocol: " + protocol + ", cannot create primary storage");
@@ -213,14 +224,28 @@ public DataStore initialize(Map dsInfos) {
long volumeSize = Long.parseLong(details.get(Constants.SIZE));
s_logger.info("Creating ONTAP volume '" + storagePoolName + "' with size: " + volumeSize + " bytes (" +
(volumeSize / (1024 * 1024 * 1024)) + " GB)");
- storageStrategy.createStorageVolume(storagePoolName, volumeSize);
+ try {
+ Volume volume = storageStrategy.createStorageVolume(storagePoolName, volumeSize);
+ if (volume == null) {
+ s_logger.error("createStorageVolume returned null for volume: " + storagePoolName);
+ throw new CloudRuntimeException("Failed to create ONTAP volume: " + storagePoolName);
+ }
+
+ s_logger.info("Volume object retrieved successfully. UUID: " + volume.getUuid() + ", Name: " + volume.getName());
+
+ details.putIfAbsent(Constants.VOLUME_UUID, volume.getUuid());
+ details.putIfAbsent(Constants.VOLUME_NAME, volume.getName());
+ } catch (Exception e) {
+ s_logger.error("Exception occurred while creating ONTAP volume: " + storagePoolName, e);
+ throw new CloudRuntimeException("Failed to create ONTAP volume: " + storagePoolName + ". Error: " + e.getMessage(), e);
+ }
} else {
throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage");
}
// Set parameters for primary data store
- parameters.setHost(details.get(Constants.MANAGEMENT_LIF));
parameters.setPort(Constants.ONTAP_PORT);
+ parameters.setHost(host);
parameters.setPath(path);
parameters.setTags(tags != null ? tags : "");
parameters.setIsTagARule(isTagARule != null ? isTagARule : Boolean.FALSE);
@@ -241,16 +266,32 @@ public DataStore initialize(Map dsInfos) {
@Override
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
logger.debug("In attachCluster for ONTAP primary storage");
- PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore;
- List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore);
+ PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
+ List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore);
+
+ logger.debug(" datastore object received is {} ",primaryStore );
- logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId()));
+ logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryStore.getClusterId()));
+
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId());
+ StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details);
+ ExportPolicy exportPolicy = new ExportPolicy();
+ AccessGroup accessGroupRequest = new AccessGroup();
+ accessGroupRequest.setHostsToConnect(hostsToConnect);
+ accessGroupRequest.setScope(scope);
+ primaryStore.setDetails(details);// setting details as it does not come from cloudstack
+ accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
+ accessGroupRequest.setPolicy(exportPolicy);
+ strategy.createAccessGroup(accessGroupRequest);
+
+ logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId());
for (HostVO host : hostsToConnect) {
// TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {
logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
+ return false;
}
}
_dataStoreHelper.attachCluster(dataStore);
@@ -265,15 +306,29 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) {
logger.debug("In attachZone for ONTAP primary storage");
- List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM);
+ PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
+ List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM);
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
+
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId());
+ StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details);
+ ExportPolicy exportPolicy = new ExportPolicy();
+ AccessGroup accessGroupRequest = new AccessGroup();
+ accessGroupRequest.setHostsToConnect(hostsToConnect);
+ accessGroupRequest.setScope(scope);
+ primaryStore.setDetails(details); // setting details as it does not come from cloudstack
+ accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
+ accessGroupRequest.setPolicy(exportPolicy);
+ strategy.createAccessGroup(accessGroupRequest);
+
for (HostVO host : hostsToConnect) {
// TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {
logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
+ return false;
}
}
_dataStoreHelper.attachZone(dataStore);
@@ -325,4 +380,3 @@ public void changeStoragePoolScopeToCluster(DataStore store, ClusterScope cluste
}
}
-
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java
new file mode 100644
index 000000000000..5b39e2ce5293
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java
@@ -0,0 +1,193 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.cloudstack.storage.listener;
+
+import javax.inject.Inject;
+
+import com.cloud.agent.api.ModifyStoragePoolCommand;
+import com.cloud.agent.api.ModifyStoragePoolAnswer;
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.alert.AlertManager;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.DeleteStoragePoolCommand;
+import com.cloud.host.Host;
+import com.cloud.storage.StoragePool;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import com.cloud.host.dao.HostDao;
+
+/**
+ * HypervisorHostListener implementation for ONTAP storage.
+ * Handles connecting/disconnecting hosts to/from ONTAP-backed storage pools.
+ */
+public class OntapHostListener implements HypervisorHostListener {
+ protected Logger logger = LogManager.getLogger(getClass());
+
+ @Inject
+ private AgentManager _agentMgr;
+ @Inject
+ private AlertManager _alertMgr;
+ @Inject
+ private PrimaryDataStoreDao _storagePoolDao;
+ @Inject
+ private HostDao _hostDao;
+ @Inject private StoragePoolHostDao storagePoolHostDao;
+
+
+ @Override
+ public boolean hostConnect(long hostId, long poolId) {
+ logger.info("Connect to host " + hostId + " from pool " + poolId);
+ Host host = _hostDao.findById(hostId);
+ if (host == null) {
+ logger.error("Failed to add host by HostListener as host was not found with id : {}", hostId);
+ return false;
+ }
+
+ // TODO add host type check also since we support only KVM for now, host.getHypervisorType().equals(HypervisorType.KVM)
+ StoragePool pool = _storagePoolDao.findById(poolId);
+ if (pool == null) {
+ logger.error("Failed to connect host - storage pool not found with id: {}", poolId);
+ return false;
+ }
+ logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName());
+ try {
+ // Create the ModifyStoragePoolCommand to send to the agent
+ // Note: Always send command even if database entry exists, because agent may have restarted
+ // and lost in-memory pool registration. The command handler is idempotent.
+ ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool);
+
+ Answer answer = _agentMgr.easySend(hostId, cmd);
+
+ if (answer == null) {
+ throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command (%s)", pool));
+ }
+
+ if (!answer.getResult()) {
+ String msg = String.format("Unable to attach storage pool %s to host %d", pool, hostId);
+
+ _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg);
+
+ throw new CloudRuntimeException(String.format(
+ "Unable to establish a connection from agent to storage pool %s due to %s", pool, answer.getDetails()));
+ }
+
+ // Get the mount path from the answer
+ ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer;
+ StoragePoolInfo poolInfo = mspAnswer.getPoolInfo();
+ if (poolInfo == null) {
+ throw new CloudRuntimeException("ModifyStoragePoolAnswer returned null poolInfo");
+ }
+
+ String localPath = poolInfo.getLocalPath();
+ logger.info("Storage pool {} successfully mounted at: {}", pool.getName(), localPath);
+
+ // Update or create the storage_pool_host_ref entry with the correct local_path
+ StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId);
+
+ if (storagePoolHost == null) {
+ storagePoolHost = new StoragePoolHostVO(poolId, hostId, localPath);
+ storagePoolHostDao.persist(storagePoolHost);
+ logger.info("Created storage_pool_host_ref entry for pool {} and host {}", pool.getName(), host.getName());
+ } else {
+ storagePoolHost.setLocalPath(localPath);
+ storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost);
+ logger.info("Updated storage_pool_host_ref entry with local_path: {}", localPath);
+ }
+
+ // Update pool capacity/usage information
+ StoragePoolVO poolVO = _storagePoolDao.findById(poolId);
+ if (poolVO != null && poolInfo.getCapacityBytes() > 0) {
+ poolVO.setCapacityBytes(poolInfo.getCapacityBytes());
+ poolVO.setUsedBytes(poolInfo.getCapacityBytes() - poolInfo.getAvailableBytes());
+ _storagePoolDao.update(poolVO.getId(), poolVO);
+ logger.info("Updated storage pool capacity: {} GB, used: {} GB", poolInfo.getCapacityBytes() / (1024 * 1024 * 1024), (poolInfo.getCapacityBytes() - poolInfo.getAvailableBytes()) / (1024 * 1024 * 1024));
+ }
+
+ } catch (Exception e) {
+ logger.error("Exception while connecting host {} to storage pool {}", host.getName(), pool.getName(), e);
+ // CRITICAL: Don't throw exception - it crashes the agent and causes restart loops
+ // Return false to indicate failure without crashing
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public boolean hostDisconnected(Host host, StoragePool pool) {
+ logger.info("Disconnect from host " + host.getId() + " from pool " + pool.getName());
+
+ Host hostToremove = _hostDao.findById(host.getId());
+ if (hostToremove == null) {
+ logger.error("Failed to add host by HostListener as host was not found with id : {}", host.getId());
+ return false;
+ }
+ // TODO add storage pool get validation
+ logger.info("Disconnecting host {} from ONTAP storage pool {}", host.getName(), pool.getName());
+
+ try {
+ DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand(pool);
+ long hostId = host.getId();
+ Answer answer = _agentMgr.easySend(hostId, cmd);
+
+ if (answer != null && answer.getResult()) {
+ logger.info("Successfully disconnected host {} from ONTAP storage pool {}", host.getName(), pool.getName());
+ return true;
+ } else {
+ String errMsg = (answer != null) ? answer.getDetails() : "Unknown error";
+ logger.warn("Failed to disconnect host {} from storage pool {}. Error: {}", host.getName(), pool.getName(), errMsg);
+ return false;
+ }
+ } catch (Exception e) {
+ logger.error("Exception while disconnecting host {} from storage pool {}", host.getName(), pool.getName(), e);
+ return false;
+ }
+ }
+
+ @Override
+ public boolean hostDisconnected(long hostId, long poolId) {
+ return false;
+ }
+
+ @Override
+ public boolean hostAboutToBeRemoved(long hostId) {
+ return false;
+ }
+
+ @Override
+ public boolean hostRemoved(long hostId, long clusterId) {
+ return false;
+ }
+
+ @Override
+ public boolean hostEnabled(long hostId) {
+ return false;
+ }
+
+ @Override
+ public boolean hostAdded(long hostId) {
+ return false;
+ }
+
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java
index fa2f14692c77..4079792f87d8 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java
@@ -27,6 +27,7 @@
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
import org.apache.cloudstack.storage.driver.OntapPrimaryDatastoreDriver;
import org.apache.cloudstack.storage.lifecycle.OntapPrimaryDatastoreLifecycle;
+import org.apache.cloudstack.storage.listener.OntapHostListener;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.springframework.stereotype.Component;
@@ -41,6 +42,7 @@ public class OntapPrimaryDatastoreProvider implements PrimaryDataStoreProvider {
private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreProvider.class);
private OntapPrimaryDatastoreDriver primaryDatastoreDriver;
private OntapPrimaryDatastoreLifecycle primaryDatastoreLifecycle;
+ private HypervisorHostListener listener;
public OntapPrimaryDatastoreProvider() {
s_logger.info("OntapPrimaryDatastoreProvider initialized");
@@ -57,7 +59,7 @@ public DataStoreDriver getDataStoreDriver() {
@Override
public HypervisorHostListener getHostListener() {
- return null;
+ return listener;
}
@Override
@@ -71,6 +73,8 @@ public boolean configure(Map params) {
s_logger.trace("OntapPrimaryDatastoreProvider: configure: Called");
primaryDatastoreDriver = ComponentContext.inject(OntapPrimaryDatastoreDriver.class);
primaryDatastoreLifecycle = ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class);
+ listener = ComponentContext.inject(OntapHostListener.class);
+
return true;
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java
index 6bb6ad1fef73..e9448ec16ded 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java
@@ -36,7 +36,7 @@ public static StorageStrategy getStrategy(OntapStorage ontapStorage) {
ProtocolType protocol = ontapStorage.getProtocol();
s_logger.info("Initializing StorageProviderFactory with protocol: " + protocol);
switch (protocol) {
- case NFS3:
+ case NFS:
if (!ontapStorage.getIsDisaggregated()) {
UnifiedNASStrategy unifiedNASStrategy = new UnifiedNASStrategy(ontapStorage);
unifiedNASStrategy.setOntapStorage(ontapStorage);
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java
index 0f9706335784..b11c60e63385 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java
@@ -27,6 +27,7 @@
import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
import org.apache.cloudstack.storage.feign.model.Aggregate;
import org.apache.cloudstack.storage.feign.model.Job;
+import org.apache.cloudstack.storage.feign.model.Nas;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
import org.apache.cloudstack.storage.feign.model.Svm;
import org.apache.cloudstack.storage.feign.model.Volume;
@@ -150,14 +151,22 @@ public Volume createStorageVolume(String volumeName, Long size) {
Svm svm = new Svm();
svm.setName(svmName);
+ Nas nas = new Nas();
+ nas.setPath("/" + volumeName);
+
volumeRequest.setName(volumeName);
volumeRequest.setSvm(svm);
volumeRequest.setAggregates(aggregates);
volumeRequest.setSize(size);
+ volumeRequest.setNas(nas); // be default if we don't set path , ONTAP create a volume with mount/junction path // TODO check if we need to append svm name or not
+ // since storage pool also cannot be duplicate so junction path can also be not duplicate so /volumeName will always be unique
// Make the POST API call to create the volume
try {
- // Create URI for POST CreateVolume API
- // Call the VolumeFeignClient to create the volume
+ /*
+ ONTAP created a default rule of 0.0.0.0 if no export rule are defined while creating volume
+ and since in storage pool creation, cloudstack is not aware of the host , we can either create default or
+ permissive rule and later update it as part of attachCluster or attachZone implementation
+ */
JobResponse jobResponse = volumeFeignClient.createVolumeWithJob(authHeader, volumeRequest);
if (jobResponse == null || jobResponse.getJob() == null) {
throw new CloudRuntimeException("Failed to initiate volume creation for " + volumeName);
@@ -192,8 +201,38 @@ public Volume createStorageVolume(String volumeName, Long size) {
throw new CloudRuntimeException("Failed to create volume: " + e.getMessage());
}
s_logger.info("Volume created successfully: " + volumeName);
- //TODO
- return null;
+ // Below code is to update volume uuid to storage pool mapping once and used for all other workflow saving get volume call
+ try {
+ Map queryParams = Map.of(Constants.NAME, volumeName);
+ s_logger.debug("Fetching volume details for: " + volumeName);
+
+ OntapResponse ontapVolume = volumeFeignClient.getVolume(authHeader, queryParams);
+ s_logger.debug("Feign call completed. Processing response...");
+
+ if (ontapVolume == null) {
+ s_logger.error("OntapResponse is null for volume: " + volumeName);
+ throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": Response is null");
+ }
+ s_logger.debug("OntapResponse is not null. Checking records field...");
+
+ if (ontapVolume.getRecords() == null) {
+ s_logger.error("OntapResponse.records is null for volume: " + volumeName);
+ throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": Records list is null");
+ }
+ s_logger.debug("Records field is not null. Size: " + ontapVolume.getRecords().size());
+
+ if (ontapVolume.getRecords().isEmpty()) {
+ s_logger.error("OntapResponse.records is empty for volume: " + volumeName);
+ throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": No records found");
+ }
+
+ Volume volume = ontapVolume.getRecords().get(0);
+ s_logger.info("Volume retrieved successfully: " + volumeName + ", UUID: " + volume.getUuid());
+ return volume;
+ } catch (Exception e) {
+ s_logger.error("Exception while retrieving volume details for: " + volumeName, e);
+ throw new CloudRuntimeException("Failed to fetch volume: " + volumeName + ". Error: " + e.getMessage(), e);
+ }
}
/**
@@ -287,7 +326,7 @@ public Volume getStorageVolume(Volume volume)
* @param accessGroup the access group to create
* @return the created AccessGroup object
*/
- abstract AccessGroup createAccessGroup(AccessGroup accessGroup);
+ abstract public AccessGroup createAccessGroup(AccessGroup accessGroup);
/**
* Method encapsulates the behavior based on the opted protocol in subclasses
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java
index cb3079691c94..de5dd9ffbe34 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java
@@ -19,30 +19,55 @@
package org.apache.cloudstack.storage.service;
+import com.cloud.host.HostVO;
+import com.cloud.utils.exception.CloudRuntimeException;
+import feign.FeignException;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.JobFeignClient;
import org.apache.cloudstack.storage.feign.client.NASFeignClient;
+import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
+import org.apache.cloudstack.storage.feign.model.ExportPolicy;
+import org.apache.cloudstack.storage.feign.model.ExportRule;
+import org.apache.cloudstack.storage.feign.model.FileInfo;
+import org.apache.cloudstack.storage.feign.model.Job;
+import org.apache.cloudstack.storage.feign.model.Nas;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.feign.model.response.JobResponse;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
import org.apache.cloudstack.storage.service.model.AccessGroup;
import org.apache.cloudstack.storage.service.model.CloudStackVolume;
import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
+import javax.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
import java.util.Map;
public class UnifiedNASStrategy extends NASStrategy {
private static final Logger s_logger = LogManager.getLogger(UnifiedNASStrategy.class);
- // Add missing Feign client setup for NAS operations
private final FeignClientFactory feignClientFactory;
private final NASFeignClient nasFeignClient;
+ private final VolumeFeignClient volumeFeignClient;
+ private final JobFeignClient jobFeignClient;
+ @Inject
+ private StoragePoolDetailsDao storagePoolDetailsDao;
public UnifiedNASStrategy(OntapStorage ontapStorage) {
super(ontapStorage);
String baseURL = Constants.HTTPS + ontapStorage.getManagementLIF();
// Initialize FeignClientFactory and create NAS client
this.feignClientFactory = new FeignClientFactory();
+ // NAS client uses export policy API endpoint
this.nasFeignClient = feignClientFactory.createClient(NASFeignClient.class, baseURL);
+ this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class,baseURL );
+ this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL );
}
public void setOntapStorage(OntapStorage ontapStorage) {
@@ -51,8 +76,24 @@ public void setOntapStorage(OntapStorage ontapStorage) {
@Override
public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume) {
- //TODO: Implement NAS volume creation using nasFeignClient
- return null;
+ s_logger.info("createCloudStackVolume: Create cloudstack volume " + cloudstackVolume);
+ // Skip ontap file creation for now
+// try {
+// boolean created = createFile(cloudstackVolume.getVolume().getUuid(),cloudstackVolume.getCloudstackVolName(), cloudstackVolume.getFile());
+// if(created){
+// s_logger.debug("Successfully created file in ONTAP under volume with path {} or name {} ", cloudstackVolume.getVolume().getUuid(), cloudstackVolume.getCloudstackVolName());
+// FileInfo responseFile = cloudstackVolume.getFile();
+// responseFile.setPath(cloudstackVolume.getCloudstackVolName());
+// }else {
+// s_logger.error("File not created for volume {}", cloudstackVolume.getVolume().getUuid());
+// throw new CloudRuntimeException("File not created");
+// }
+//
+// }catch (Exception e) {
+// s_logger.error("Exception occurred while creating file or dir: {}. Exception: {}", cloudstackVolume.getCloudstackVolName(), e.getMessage());
+// throw new CloudRuntimeException("Failed to create file: " + e.getMessage());
+// }
+ return cloudstackVolume;
}
@Override
@@ -74,8 +115,27 @@ CloudStackVolume getCloudStackVolume(CloudStackVolume cloudstackVolume) {
@Override
public AccessGroup createAccessGroup(AccessGroup accessGroup) {
- //TODO
- return null;
+
+ Map details = accessGroup.getPrimaryDataStoreInfo().getDetails();
+ String svmName = details.get(Constants.SVM_NAME);
+ String volumeUUID = details.get(Constants.VOLUME_UUID);
+ String volumeName = details.get(Constants.VOLUME_NAME);
+
+ // Create the export policy
+ ExportPolicy policyRequest = createExportPolicyRequest(accessGroup,svmName,volumeName);
+ try {
+ createExportPolicy(svmName, policyRequest);
+ s_logger.info("ExportPolicy created: {}, now attaching this policy to storage pool volume", policyRequest.getName());
+
+ // attach export policy to volume of storage pool
+ assignExportPolicyToVolume(volumeUUID,policyRequest.getName());
+ s_logger.info("Successfully assigned exportPolicy {} to volume {}", policyRequest.getName(), volumeName);
+ accessGroup.setPolicy(policyRequest);
+ return accessGroup;
+ }catch(Exception e){
+ s_logger.error("Exception occurred while creating access group: " + e);
+ throw new CloudRuntimeException("Failed to create access group: " + e);
+ }
}
@Override
@@ -104,4 +164,239 @@ void enableLogicalAccess(Map values) {
void disableLogicalAccess(Map values) {
//TODO
}
+
+
+ private void createExportPolicy(String svmName, ExportPolicy policy) {
+ s_logger.info("Creating export policy: {} for SVM: {}", policy, svmName);
+
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ nasFeignClient.createExportPolicy(authHeader, policy);
+ try {
+ Map queryParams = Map.of(Constants.NAME, policy.getName());
+ OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams);
+ if (policiesResponse == null || policiesResponse.getRecords().isEmpty()) {
+ throw new CloudRuntimeException("Export policy " + policy.getName() + " was not created on ONTAP. " +
+ "Received successful response but policy does not exist.");
+ }
+ s_logger.info("Export policy created and verified successfully: " + policy.getName());
+ } catch (FeignException e) {
+ s_logger.error("Failed to verify export policy creation: " + policy.getName(), e);
+ throw new CloudRuntimeException("Export policy creation verification failed: " + e.getMessage());
+ }
+ s_logger.info("Export policy created successfully with name {}", policy.getName());
+ } catch (FeignException e) {
+ s_logger.error("Failed to create export policy: {}", policy, e);
+ throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage());
+ } catch (Exception e) {
+ s_logger.error("Exception while creating export policy: {}", policy, e);
+ throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage());
+ }
+ }
+
+ private void deleteExportPolicy(String svmName, String policyName) {
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ Map queryParams = Map.of(Constants.NAME, policyName);
+ OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams);
+
+ if (policiesResponse == null ) {
+ s_logger.warn("Export policy not found for deletion: {}", policyName);
+ throw new CloudRuntimeException("Export policy not found : " + policyName);
+ }
+ String policyId = String.valueOf(policiesResponse.getRecords().get(0).getId());
+ nasFeignClient.deleteExportPolicyById(authHeader, policyId);
+ s_logger.info("Export policy deleted successfully: {}", policyName);
+ } catch (Exception e) {
+ s_logger.error("Failed to delete export policy: {}", policyName, e);
+ throw new CloudRuntimeException("Failed to delete export policy: " + policyName);
+ }
+ }
+
+
+ private String addExportRule(String policyName, String clientMatch, String[] protocols, String[] roRule, String[] rwRule) {
+ return "";
+ }
+
+ private void assignExportPolicyToVolume(String volumeUuid, String policyName) {
+ s_logger.info("Assigning export policy: {} to volume: {}", policyName, volumeUuid);
+
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ Map queryParams = Map.of(Constants.NAME, policyName);
+ OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams);
+ if (policiesResponse == null || policiesResponse.getRecords().isEmpty()) {
+ s_logger.error("Export policy not found for assigning rule: {}", policyName);
+ throw new CloudRuntimeException("Export policy not found: " + policyName);
+ }
+
+ // Create Volume update object with NAS configuration
+ Volume volumeUpdate = new Volume();
+ Nas nas = new Nas();
+ ExportPolicy policy = new ExportPolicy();
+ policy.setName(policyName);
+ nas.setExportPolicy(policy);
+ volumeUpdate.setNas(nas);
+
+ try {
+ /*
+ ONTAP created a default rule of 0.0.0.0 if no export rule are defined while creating volume
+ and since in storage pool creation, cloudstack is not aware of the host , we can either create default or
+ permissive rule and later update it as part of attachCluster or attachZone implementation
+ */
+ JobResponse jobResponse = volumeFeignClient.updateVolumeRebalancing(authHeader, volumeUuid, volumeUpdate);
+ if (jobResponse == null || jobResponse.getJob() == null) {
+ throw new CloudRuntimeException("Failed to attach policy " + policyName + "to volume " + volumeUuid);
+ }
+ String jobUUID = jobResponse.getJob().getUuid();
+
+ //Create URI for GET Job API
+ int jobRetryCount = 0;
+ Job createVolumeJob = null;
+ while(createVolumeJob == null || !createVolumeJob.getState().equals(Constants.JOB_SUCCESS)) {
+ if(jobRetryCount >= Constants.JOB_MAX_RETRIES) {
+ s_logger.error("Job to update volume " + volumeUuid + " did not complete within expected time.");
+ throw new CloudRuntimeException("Job to update volume " + volumeUuid + " did not complete within expected time.");
+ }
+
+ try {
+ createVolumeJob = jobFeignClient.getJobByUUID(authHeader, jobUUID);
+ if (createVolumeJob == null) {
+ s_logger.warn("Job with UUID " + jobUUID + " not found. Retrying...");
+ } else if (createVolumeJob.getState().equals(Constants.JOB_FAILURE)) {
+ throw new CloudRuntimeException("Job to update volume " + volumeUuid + " failed with error: " + createVolumeJob.getMessage());
+ }
+ } catch (FeignException.FeignClientException e) {
+ throw new CloudRuntimeException("Failed to fetch job status: " + e.getMessage());
+ }
+
+ jobRetryCount++;
+ Thread.sleep(Constants.CREATE_VOLUME_CHECK_SLEEP_TIME); // Sleep for 2 seconds before polling again
+ }
+ } catch (Exception e) {
+ s_logger.error("Exception while updating volume: ", e);
+ throw new CloudRuntimeException("Failed to update volume: " + e.getMessage());
+ }
+
+ s_logger.info("Export policy successfully assigned to volume: {}", volumeUuid);
+ } catch (FeignException e) {
+ s_logger.error("Failed to assign export policy to volume: {}", volumeUuid, e);
+ throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage());
+ } catch (Exception e) {
+ s_logger.error("Exception while assigning export policy to volume: {}", volumeUuid, e);
+ throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage());
+ }
+ }
+
+ private boolean createFile(String volumeUuid, String filePath, FileInfo fileInfo) {
+ s_logger.info("Creating file: {} in volume: {}", filePath, volumeUuid);
+
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ nasFeignClient.createFile(authHeader, volumeUuid, filePath, fileInfo);
+ s_logger.info("File created successfully: {} in volume: {}", filePath, volumeUuid);
+ return true;
+ } catch (FeignException e) {
+ s_logger.error("Failed to create file: {} in volume: {}", filePath, volumeUuid, e);
+ return false;
+ } catch (Exception e) {
+ s_logger.error("Exception while creating file: {} in volume: {}", filePath, volumeUuid, e);
+ return false;
+ }
+ }
+
+ private boolean deleteFile(String volumeUuid, String filePath) {
+ s_logger.info("Deleting file: {} from volume: {}", filePath, volumeUuid);
+
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ nasFeignClient.deleteFile(authHeader, volumeUuid, filePath);
+ s_logger.info("File deleted successfully: {} from volume: {}", filePath, volumeUuid);
+ return true;
+ } catch (FeignException e) {
+ s_logger.error("Failed to delete file: {} from volume: {}", filePath, volumeUuid, e);
+ return false;
+ } catch (Exception e) {
+ s_logger.error("Exception while deleting file: {} from volume: {}", filePath, volumeUuid, e);
+ return false;
+ }
+ }
+
+ private OntapResponse getFileInfo(String volumeUuid, String filePath) {
+ s_logger.debug("Getting file info for: {} in volume: {}", filePath, volumeUuid);
+
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ OntapResponse response = nasFeignClient.getFileResponse(authHeader, volumeUuid, filePath);
+ s_logger.debug("Retrieved file info for: {} in volume: {}", filePath, volumeUuid);
+ return response;
+ } catch (FeignException e){
+ if (e.status() == 404) {
+ s_logger.debug("File not found: {} in volume: {}", filePath, volumeUuid);
+ return null;
+ }
+ s_logger.error("Failed to get file info: {} in volume: {}", filePath, volumeUuid, e);
+ throw new CloudRuntimeException("Failed to get file info: " + e.getMessage());
+ } catch (Exception e){
+ s_logger.error("Exception while getting file info: {} in volume: {}", filePath, volumeUuid, e);
+ throw new CloudRuntimeException("Failed to get file info: " + e.getMessage());
+ }
+ }
+
+ private boolean updateFile(String volumeUuid, String filePath, FileInfo fileInfo) {
+ s_logger.info("Updating file: {} in volume: {}", filePath, volumeUuid);
+
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ nasFeignClient.updateFile( authHeader, volumeUuid, filePath, fileInfo);
+ s_logger.info("File updated successfully: {} in volume: {}", filePath, volumeUuid);
+ return true;
+ } catch (FeignException e) {
+ s_logger.error("Failed to update file: {} in volume: {}", filePath, volumeUuid, e);
+ return false;
+ } catch (Exception e){
+ s_logger.error("Exception while updating file: {} in volume: {}", filePath, volumeUuid, e);
+ return false;
+ }
+ }
+
+ private String generateExportPolicyName(String svmName, String volumeName){
+ return Constants.EXPORT + Constants.HYPHEN + svmName + Constants.HYPHEN + volumeName;
+ }
+
+ private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String svmName , String volumeName){
+
+ String exportPolicyName = generateExportPolicyName(svmName,volumeName);
+ ExportPolicy exportPolicy = new ExportPolicy();
+
+ List rules = new ArrayList<>();
+ ExportRule exportRule = new ExportRule();
+
+ List exportClients = new ArrayList<>();
+ List hosts = accessGroup.getHostsToConnect();
+ for (HostVO host : hosts) {
+ String hostStorageIp = host.getStorageIpAddress();
+ String ip = (hostStorageIp != null && !hostStorageIp.isEmpty())
+ ? hostStorageIp
+ : host.getPrivateIpAddress();
+ String ipToUse = ip + "/32";
+ ExportRule.ExportClient exportClient = new ExportRule.ExportClient();
+ exportClient.setMatch(ipToUse);
+ exportClients.add(exportClient);
+ }
+ exportRule.setClients(exportClients);
+ exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.any));
+ exportRule.setRoRule(List.of("sys")); // Use sys (Unix UID/GID) authentication for NFS
+ exportRule.setRwRule(List.of("sys")); // Use sys (Unix UID/GID) authentication for NFS
+ exportRule.setSuperuser(List.of("sys")); // Allow root/superuser access with sys auth
+ rules.add(exportRule);
+
+ Svm svm = new Svm();
+ svm.setName(svmName);
+ exportPolicy.setSvm(svm);
+ exportPolicy.setRules(rules);
+ exportPolicy.setName(exportPolicyName);
+
+ return exportPolicy;
+ }
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java
index c4dfce7ce51c..a3467cc30d2d 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java
@@ -19,14 +19,24 @@
package org.apache.cloudstack.storage.service.model;
+import com.cloud.host.HostVO;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
import org.apache.cloudstack.storage.feign.model.ExportPolicy;
import org.apache.cloudstack.storage.feign.model.Igroup;
+import java.util.List;
+
public class AccessGroup {
private Igroup igroup;
private ExportPolicy exportPolicy;
+ private List hostsToConnect;
+ private PrimaryDataStoreInfo primaryDataStoreInfo;
+ private Scope scope;
+
+
public Igroup getIgroup() {
return igroup;
}
@@ -42,4 +52,23 @@ public ExportPolicy getPolicy() {
public void setPolicy(ExportPolicy policy) {
this.exportPolicy = policy;
}
+ public List getHostsToConnect() {
+ return hostsToConnect;
+ }
+ public void setHostsToConnect(List hostsToConnect) {
+ this.hostsToConnect = hostsToConnect;
+ }
+ public PrimaryDataStoreInfo getPrimaryDataStoreInfo() {
+ return primaryDataStoreInfo;
+ }
+ public void setPrimaryDataStoreInfo(PrimaryDataStoreInfo primaryDataStoreInfo) {
+ this.primaryDataStoreInfo = primaryDataStoreInfo;
+ }
+ public Scope getScope() {
+ return scope;
+ }
+ public void setScope(Scope scope) {
+ this.scope = scope;
+ }
+
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java
index a7f5d8659d03..694c4a2c126f 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java
@@ -21,11 +21,15 @@
import org.apache.cloudstack.storage.feign.model.FileInfo;
import org.apache.cloudstack.storage.feign.model.Lun;
+import org.apache.cloudstack.storage.feign.model.Volume;
public class CloudStackVolume {
private FileInfo file;
private Lun lun;
+ private Volume volume;
+ // will be replaced after testing
+ private String cloudstackVolName;
public FileInfo getFile() {
return file;
@@ -42,4 +46,16 @@ public Lun getLun() {
public void setLun(Lun lun) {
this.lun = lun;
}
+ public Volume getVolume() {
+ return volume;
+ }
+ public void setVolume(Volume volume) {
+ this.volume = volume;
+ }
+ public String getCloudstackVolName() {
+ return cloudstackVolName;
+ }
+ public void setCloudstackVolName(String cloudstackVolName) {
+ this.cloudstackVolName = cloudstackVolName;
+ }
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java
index 00dca62480dc..47b55ec29bb7 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java
@@ -20,6 +20,6 @@
package org.apache.cloudstack.storage.service.model;
public enum ProtocolType {
- NFS3,
+ NFS,
ISCSI
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java
index b58e8484cd48..a885179da05b 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java
@@ -31,6 +31,9 @@ public class Constants {
public static final String MANAGEMENT_LIF = "managementLIF";
public static final String IS_DISAGGREGATED = "isDisaggregated";
public static final String RUNNING = "running";
+ public static final String VOLUME_UUID = "volumeUUID";
+ public static final String VOLUME_NAME = "volumeNAME";
+ public static final String EXPORT = "export";
public static final int ONTAP_PORT = 443;
@@ -53,6 +56,7 @@ public class Constants {
public static final String EQUALS = "=";
public static final String SEMICOLON = ";";
public static final String COMMA = ",";
+ public static final String HYPHEN = "-";
public static final String VOLUME_PATH_PREFIX = "/vol/";
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java
index af48724f984c..b1f50ee513be 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java
@@ -21,11 +21,16 @@
import com.cloud.utils.StringUtils;
import com.cloud.utils.exception.CloudRuntimeException;
-import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.feign.model.FileInfo;
import org.apache.cloudstack.storage.feign.model.Lun;
import org.apache.cloudstack.storage.feign.model.LunSpace;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.provider.StorageProviderFactory;
+import org.apache.cloudstack.storage.service.StorageStrategy;
import org.apache.cloudstack.storage.service.model.CloudStackVolume;
import org.apache.cloudstack.storage.service.model.ProtocolType;
import org.apache.logging.log4j.LogManager;
@@ -53,41 +58,98 @@ public static String generateAuthHeader (String username, String password) {
return BASIC + StringUtils.SPACE + new String(encodedBytes);
}
- public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, DataObject dataObject) {
- CloudStackVolume cloudStackVolumeRequest = null;
+ /**
+ * Creates CloudStackVolume request object for ONTAP REST API calls.
+ *
+ * IMPORTANT: For Managed NFS (Option 2 Implementation):
+ * - The NFS case below is DEPRECATED and NOT USED
+ * - OntapPrimaryDatastoreDriver.createManagedNfsVolume() handles NFS volumes
+ * - It returns UUID only without creating files (KVM creates qcow2 automatically)
+ * - This method is ONLY used for iSCSI/block storage volumes
+ *
+ * @param storagePool Storage pool information
+ * @param details Storage pool details with ONTAP connection info
+ * @param volumeObject Volume information
+ * @return CloudStackVolume request for ONTAP REST API
+ */
+ public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, VolumeInfo volumeObject) {
+ CloudStackVolume cloudStackVolumeRequest = null;
- String protocol = details.get(Constants.PROTOCOL);
- if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) {
- cloudStackVolumeRequest = new CloudStackVolume();
- Lun lunRequest = new Lun();
- Svm svm = new Svm();
- svm.setName(details.get(Constants.SVM_NAME));
- lunRequest.setSvm(svm);
+ String protocol = details.get(Constants.PROTOCOL);
+ ProtocolType protocolType = ProtocolType.valueOf(protocol);
+ switch (protocolType) {
+ case NFS:
+ // DEPRECATED: This NFS case is NOT USED in Option 2 Implementation
+ // For Managed NFS, OntapPrimaryDatastoreDriver.createManagedNfsVolume()
+ // returns UUID only and lets KVM create qcow2 files automatically.
+ // This legacy code remains for reference but is bypassed in current implementation.
+ s_logger.warn("createCloudStackVolumeRequestByProtocol: NFS case should not be called. " +
+ "Use OntapPrimaryDatastoreDriver.createManagedNfsVolume() instead.");
+ cloudStackVolumeRequest = new CloudStackVolume();
+ FileInfo file = new FileInfo();
+ file.setSize(volumeObject.getSize());
+ file.setUnixPermissions(755);
+ file.setType(FileInfo.TypeEnum.FILE);
- LunSpace lunSpace = new LunSpace();
- lunSpace.setSize(dataObject.getSize());
- lunRequest.setSpace(lunSpace);
- //Lun name is full path like in unified "/vol/VolumeName/LunName"
- String lunFullName = Constants.VOLUME_PATH_PREFIX + storagePool.getName() + Constants.PATH_SEPARATOR + dataObject.getName();
- lunRequest.setName(lunFullName);
+ Volume poolVolume = new Volume();
+ poolVolume.setName(details.get(Constants.VOLUME_NAME));
+ poolVolume.setUuid(details.get(Constants.VOLUME_UUID));
+ cloudStackVolumeRequest.setVolume(poolVolume);
+ cloudStackVolumeRequest.setFile(file);
+ cloudStackVolumeRequest.setCloudstackVolName(volumeObject.getName());
+ break;
+ case ISCSI:
+ cloudStackVolumeRequest = new CloudStackVolume();
+ Lun lunRequest = new Lun();
+ Svm svm = new Svm();
+ svm.setName(details.get(Constants.SVM_NAME));
+ lunRequest.setSvm(svm);
- String hypervisorType = storagePool.getHypervisor().name();
- String osType = null;
- switch (hypervisorType) {
- case Constants.KVM:
- osType = Lun.OsTypeEnum.LINUX.getValue();
- break;
- default:
- String errMsg = "createCloudStackVolume : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage";
- s_logger.error(errMsg);
- throw new CloudRuntimeException(errMsg);
- }
- lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType));
+ LunSpace lunSpace = new LunSpace();
+ lunSpace.setSize(volumeObject.getSize());
+ lunRequest.setSpace(lunSpace);
+ //Lun name is full path like in unified "/vol/VolumeName/LunName"
+ String lunFullName = Constants.VOLUME_PATH_PREFIX + storagePool.getName() + Constants.PATH_SEPARATOR + volumeObject.getName();
+ lunRequest.setName(lunFullName);
+
+ String hypervisorType = storagePool.getHypervisor().name();
+ String osType = null;
+ switch (hypervisorType) {
+ case Constants.KVM:
+ osType = Lun.OsTypeEnum.LINUX.getValue();
+ break;
+ default:
+ String errMsg = "createCloudStackVolume : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage";
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+ lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType));
+ cloudStackVolumeRequest.setLun(lunRequest);
+ break;
+ default:
+ throw new CloudRuntimeException("createCloudStackVolumeRequestByProtocol: Unsupported protocol " + protocol);
+
+ }
+ return cloudStackVolumeRequest;
+ }
- cloudStackVolumeRequest.setLun(lunRequest);
- return cloudStackVolumeRequest;
- } else {
- throw new CloudRuntimeException("createCloudStackVolumeRequestByProtocol: Unsupported protocol " + protocol);
- }
+ public static StorageStrategy getStrategyByStoragePoolDetails(Map details) {
+ if (details == null || details.isEmpty()) {
+ s_logger.error("getStrategyByStoragePoolDetails: Storage pool details are null or empty");
+ throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Storage pool details are null or empty");
+ }
+ String protocol = details.get(Constants.PROTOCOL);
+ OntapStorage ontapStorage = new OntapStorage(details.get(Constants.USERNAME), details.get(Constants.PASSWORD),
+ details.get(Constants.MANAGEMENT_LIF), details.get(Constants.SVM_NAME), ProtocolType.valueOf(protocol),
+ Boolean.parseBoolean(details.get(Constants.IS_DISAGGREGATED)));
+ StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
+ boolean isValid = storageStrategy.connect();
+ if (isValid) {
+ s_logger.info("Connection to Ontap SVM [{}] successful", details.get(Constants.SVM_NAME));
+ return storageStrategy;
+ } else {
+ s_logger.error("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed");
+ throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed");
+ }
}
}