diff --git a/plugins/storage/volume/ontap/pom.xml b/plugins/storage/volume/ontap/pom.xml
index 3628f6f3f592..afd3af113146 100644
--- a/plugins/storage/volume/ontap/pom.xml
+++ b/plugins/storage/volume/ontap/pom.xml
@@ -31,11 +31,11 @@
2021.0.7
11.0
20230227
- 2.13.4
4.5.14
1.6.2
3.8.1
2.22.2
+ 2.13.4
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java
index 7ddce42991f5..5e79aa2298da 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java
@@ -18,8 +18,11 @@
*/
package org.apache.cloudstack.storage.driver;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.to.DataObjectType;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO;
+import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.Host;
import com.cloud.storage.Storage;
import com.cloud.storage.StoragePool;
@@ -40,11 +43,12 @@
import org.apache.cloudstack.storage.command.CommandResult;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
-import org.apache.cloudstack.storage.feign.model.OntapStorage;
-import org.apache.cloudstack.storage.provider.StorageProviderFactory;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
import org.apache.cloudstack.storage.service.model.ProtocolType;
import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -58,13 +62,15 @@ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver {
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
@Inject private PrimaryDataStoreDao storagePoolDao;
+
@Override
public Map getCapabilities() {
s_logger.trace("OntapPrimaryDatastoreDriver: getCapabilities: Called");
Map mapCapabilities = new HashMap<>();
-
- mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString());
- mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
+ // RAW managed initial implementation: snapshot features not yet supported
+ // TODO Set it to false once we start supporting snapshot feature
+ mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.FALSE.toString());
+ mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.FALSE.toString());
return mapCapabilities;
}
@@ -75,18 +81,93 @@ public DataTO getTO(DataObject data) {
}
@Override
- public DataStoreTO getStoreTO(DataStore store) {
- return null;
- }
+ public DataStoreTO getStoreTO(DataStore store) { return null; }
@Override
public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) {
+ CreateCmdResult createCmdResult = null;
+ String path = null;
+ String errMsg = null;
+ if (dataStore == null) {
+ throw new InvalidParameterValueException("createAsync: dataStore should not be null");
+ }
+ if (dataObject == null) {
+ throw new InvalidParameterValueException("createAsync: dataObject should not be null");
+ }
+ if (callback == null) {
+ throw new InvalidParameterValueException("createAsync: callback should not be null");
+ }
+ try {
+ s_logger.info("createAsync: Started for data store [{}] and data object [{}] of type [{}]",
+ dataStore, dataObject, dataObject.getType());
+ if (dataObject.getType() == DataObjectType.VOLUME) {
+ VolumeInfo volumeInfo = (VolumeInfo) dataObject;
+ path = createCloudStackVolumeForTypeVolume(dataStore, volumeInfo);
+ createCmdResult = new CreateCmdResult(path, new Answer(null, true, null));
+ } else {
+ errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync";
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+ } catch (Exception e) {
+ errMsg = e.getMessage();
+ s_logger.error("createAsync: Failed for dataObject [{}]: {}", dataObject, errMsg);
+ createCmdResult = new CreateCmdResult(null, new Answer(null, false, errMsg));
+ createCmdResult.setResult(e.toString());
+ } finally {
+ if (createCmdResult != null && createCmdResult.isSuccess()) {
+ s_logger.info("createAsync: Volume created successfully. Path: {}", path);
+ }
+ callback.complete(createCmdResult);
+ }
+ }
+ private String createCloudStackVolumeForTypeVolume(DataStore dataStore, VolumeInfo volumeObject) {
+ StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
+ if(storagePool == null) {
+ s_logger.error("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId());
+ throw new CloudRuntimeException("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId());
+ }
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId());
+ StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details);
+ s_logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(Constants.SVM_NAME));
+ CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, volumeObject);
+ CloudStackVolume cloudStackVolume = storageStrategy.createCloudStackVolume(cloudStackVolumeRequest);
+ if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL)) && cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) {
+ return cloudStackVolume.getLun().getName();
+ } else if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) {
+ return volumeObject.getUuid(); // return the volume UUID for agent as path for mounting
+ } else {
+ String errMsg = "createCloudStackVolumeForTypeVolume: Volume creation failed. Lun or Lun Path is null for dataObject: " + volumeObject;
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
}
@Override
public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback callback) {
-
+ CommandResult commandResult = new CommandResult();
+ try {
+ if (store == null || data == null) {
+ throw new CloudRuntimeException("deleteAsync: store or data is null");
+ }
+ if (data.getType() == DataObjectType.VOLUME) {
+ StoragePoolVO storagePool = storagePoolDao.findById(store.getId());
+ if(storagePool == null) {
+ s_logger.error("deleteAsync : Storage Pool not found for id: " + store.getId());
+ throw new CloudRuntimeException("deleteAsync : Storage Pool not found for id: " + store.getId());
+ }
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(store.getId());
+ if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) {
+ // ManagedNFS qcow2 backing file deletion handled by KVM host/libvirt; nothing to do via ONTAP REST.
+ s_logger.info("deleteAsync: ManagedNFS volume {} no-op ONTAP deletion", data.getId());
+ }
+ }
+ } catch (Exception e) {
+ commandResult.setResult(e.getMessage());
+ } finally {
+ callback.complete(commandResult);
+ }
}
@Override
@@ -121,7 +202,6 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore
@Override
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) {
-
}
@Override
@@ -161,7 +241,7 @@ public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, Qual
@Override
public boolean canProvideStorageStats() {
- return true;
+ return false;
}
@Override
@@ -171,7 +251,7 @@ public Pair getStorageStats(StoragePool storagePool) {
@Override
public boolean canProvideVolumeStats() {
- return true;
+ return false; // Not yet implemented for RAW managed NFS
}
@Override
@@ -213,24 +293,4 @@ public boolean isStorageSupportHA(Storage.StoragePoolType type) {
public void detachVolumeFromAllStorageNodes(Volume volume) {
}
-
- private StorageStrategy getStrategyByStoragePoolDetails(Map details) {
- if (details == null || details.isEmpty()) {
- s_logger.error("getStrategyByStoragePoolDetails: Storage pool details are null or empty");
- throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Storage pool details are null or empty");
- }
- String protocol = details.get(Constants.PROTOCOL);
- OntapStorage ontapStorage = new OntapStorage(details.get(Constants.USERNAME), details.get(Constants.PASSWORD),
- details.get(Constants.MANAGEMENT_LIF), details.get(Constants.SVM_NAME), Long.parseLong(details.get(Constants.SIZE)), ProtocolType.valueOf(protocol),
- Boolean.parseBoolean(details.get(Constants.IS_DISAGGREGATED)));
- StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
- boolean isValid = storageStrategy.connect();
- if (isValid) {
- s_logger.info("Connection to Ontap SVM [{}] successful", details.get(Constants.SVM_NAME));
- return storageStrategy;
- } else {
- s_logger.error("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed");
- throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed");
- }
- }
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java
index 79d28a6075be..d722a857c007 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java
@@ -137,6 +137,7 @@ public Decoder createDecoder() {
@Override
public Object decode(Response response, Type type) throws IOException, DecodeException {
if (response.body() == null) {
+ logger.debug("Response body is null, returning null");
return null;
}
String json = null;
@@ -145,8 +146,11 @@ public Object decode(Response response, Type type) throws IOException, DecodeExc
logger.debug("Decoding JSON response: {}", json);
return objectMapper.readValue(json, objectMapper.getTypeFactory().constructType(type));
} catch (IOException e) {
- logger.error("Error decoding JSON response. Status: {}, Raw body: {}", response.status(), json, e);
+ logger.error("IOException during decoding. Status: {}, Raw body: {}", response.status(), json, e);
throw new DecodeException(response.status(), "Error decoding JSON response", response.request(), e);
+ } catch (Exception e) {
+ logger.error("Unexpected error during decoding. Status: {}, Type: {}, Raw body: {}", response.status(), type, json, e);
+ throw new DecodeException(response.status(), "Unexpected error during decoding", response.request(), e);
}
}
};
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java
index b7aac9954cfe..f48f83dc28de 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java
@@ -19,6 +19,7 @@
package org.apache.cloudstack.storage.feign.client;
+import feign.QueryMap;
import org.apache.cloudstack.storage.feign.model.ExportPolicy;
import org.apache.cloudstack.storage.feign.model.FileInfo;
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
@@ -26,58 +27,60 @@
import feign.Param;
import feign.RequestLine;
-//TODO: Proper URLs should be added in the RequestLine annotations below
+import java.util.Map;
+
public interface NASFeignClient {
// File Operations
- @RequestLine("GET /{volumeUuid}/files/{path}")
+ @RequestLine("GET /api/storage/volumes/{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
OntapResponse getFileResponse(@Param("authHeader") String authHeader,
- @Param("volumeUuid") String volumeUUID,
- @Param("path") String filePath);
+ @Param("volumeUuid") String volumeUUID,
+ @Param("path") String filePath);
- @RequestLine("DELETE /{volumeUuid}/files/{path}")
+ @RequestLine("DELETE /api/storage/volumes/{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
void deleteFile(@Param("authHeader") String authHeader,
- @Param("volumeUuid") String volumeUUID,
- @Param("path") String filePath);
+ @Param("volumeUuid") String volumeUUID,
+ @Param("path") String filePath);
- @RequestLine("PATCH /{volumeUuid}/files/{path}")
+ @RequestLine("PATCH /api/storage/volumes/{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
void updateFile(@Param("authHeader") String authHeader,
- @Param("volumeUuid") String volumeUUID,
- @Param("path") String filePath, FileInfo fileInfo);
+ @Param("volumeUuid") String volumeUUID,
+ @Param("path") String filePath,
+ FileInfo fileInfo);
- @RequestLine("POST /{volumeUuid}/files/{path}")
+ @RequestLine("POST /api/storage/volumes/{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
void createFile(@Param("authHeader") String authHeader,
- @Param("volumeUuid") String volumeUUID,
- @Param("path") String filePath, FileInfo file);
+ @Param("volumeUuid") String volumeUUID,
+ @Param("path") String filePath,
+ FileInfo file);
// Export Policy Operations
- @RequestLine("POST /")
- @Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"})
- ExportPolicy createExportPolicy(@Param("authHeader") String authHeader,
- @Param("returnRecords") boolean returnRecords,
+ @RequestLine("POST /api/protocols/nfs/export-policies")
+ @Headers({"Authorization: {authHeader}"})
+ void createExportPolicy(@Param("authHeader") String authHeader,
ExportPolicy exportPolicy);
- @RequestLine("GET /")
+ @RequestLine("GET /api/protocols/nfs/export-policies")
@Headers({"Authorization: {authHeader}"})
- OntapResponse getExportPolicyResponse(@Param("authHeader") String authHeader);
+ OntapResponse getExportPolicyResponse(@Param("authHeader") String authHeader, @QueryMap Map queryMap);
- @RequestLine("GET /{id}")
+ @RequestLine("GET /api/protocols/nfs/export-policies/{id}")
@Headers({"Authorization: {authHeader}"})
- OntapResponse getExportPolicyById(@Param("authHeader") String authHeader,
- @Param("id") String id);
+ ExportPolicy getExportPolicyById(@Param("authHeader") String authHeader,
+ @Param("id") String id);
- @RequestLine("DELETE /{id}")
+ @RequestLine("DELETE /api/protocols/nfs/export-policies/{id}")
@Headers({"Authorization: {authHeader}"})
void deleteExportPolicyById(@Param("authHeader") String authHeader,
- @Param("id") String id);
+ @Param("id") String id);
- @RequestLine("PATCH /{id}")
+ @RequestLine("PATCH /api/protocols/nfs/export-policies/{id}")
@Headers({"Authorization: {authHeader}"})
OntapResponse updateExportPolicy(@Param("authHeader") String authHeader,
- @Param("id") String id,
- ExportPolicy request);
+ @Param("id") String id,
+ ExportPolicy request);
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java
index 4acbbecf6573..4dc82a68238e 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java
@@ -1,3 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
package org.apache.cloudstack.storage.feign.client;
import feign.Headers;
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java
index 717409664662..6384566487d4 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java
@@ -46,7 +46,11 @@ public interface VolumeFeignClient {
@Headers({"Authorization: {authHeader}"})
Volume getVolumeByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
+ @RequestLine("GET /api/storage/volumes")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getVolume(@Param("authHeader") String authHeader, @QueryMap Map queryMap);
+
@RequestLine("PATCH /api/storage/volumes/{uuid}")
- @Headers({"Accept: {acceptHeader}", "Authorization: {authHeader}"})
- JobResponse updateVolumeRebalancing(@Param("acceptHeader") String acceptHeader, @Param("uuid") String uuid, Volume volumeRequest);
+ @Headers({ "Authorization: {authHeader}"})
+ JobResponse updateVolumeRebalancing(@Param("authHeader") String authHeader, @Param("uuid") String uuid, Volume volumeRequest);
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java
index 8f3c9597dca7..788fc8b5544d 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java
@@ -76,6 +76,16 @@ public static ProtocolsEnum fromValue(String text) {
@JsonProperty("protocols")
private List protocols = null;
+ @JsonProperty("ro_rule")
+ private List roRule = null;
+
+ @JsonProperty("rw_rule")
+ private List rwRule = null;
+
+ @JsonProperty("superuser")
+ private List superuser = null;
+
+
public ExportRule anonymousUser(String anonymousUser) {
this.anonymousUser = anonymousUser;
return this;
@@ -140,6 +150,30 @@ public void setMatch (String match) {
}
}
+ public List getRwRule() {
+ return rwRule;
+ }
+
+ public void setRwRule(List rwRule) {
+ this.rwRule = rwRule;
+ }
+
+ public List getRoRule() {
+ return roRule;
+ }
+
+ public void setRoRule(List roRule) {
+ this.roRule = roRule;
+ }
+
+ public List getSuperuser() {
+ return superuser;
+ }
+
+ public void setSuperuser(List superuser) {
+ this.superuser = superuser;
+ }
+
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java
old mode 100644
new mode 100755
index d12d6838ccb5..2cdd7de0b7c5
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java
@@ -23,7 +23,6 @@
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
-import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.resource.ResourceManager;
@@ -40,12 +39,13 @@
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
-import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao;
import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
+import org.apache.cloudstack.storage.feign.model.ExportPolicy;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Volume;
import org.apache.cloudstack.storage.provider.StorageProviderFactory;
import org.apache.cloudstack.storage.service.StorageStrategy;
import org.apache.cloudstack.storage.service.model.AccessGroup;
@@ -221,6 +221,21 @@ public DataStore initialize(Map dsInfos) {
}
s_logger.info("Using Data LIF for storage access: " + dataLIF);
details.put(Constants.DATA_LIF, dataLIF);
+ s_logger.info("Creating ONTAP volume '" + storagePoolName + "' with size: " + volumeSize + " bytes (" +
+ (volumeSize / (1024 * 1024 * 1024)) + " GB)");
+ try {
+ Volume volume = storageStrategy.createStorageVolume(storagePoolName, volumeSize);
+ if (volume == null) {
+ s_logger.error("createStorageVolume returned null for volume: " + storagePoolName);
+ throw new CloudRuntimeException("Failed to create ONTAP volume: " + storagePoolName);
+ }
+ s_logger.info("Volume object retrieved successfully. UUID: " + volume.getUuid() + ", Name: " + volume.getName());
+ details.putIfAbsent(Constants.VOLUME_UUID, volume.getUuid());
+ details.putIfAbsent(Constants.VOLUME_NAME, volume.getName());
+ } catch (Exception e) {
+ s_logger.error("Exception occurred while creating ONTAP volume: " + storagePoolName, e);
+ throw new CloudRuntimeException("Failed to create ONTAP volume: " + storagePoolName + ". Error: " + e.getMessage(), e);
+ }
} else {
throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage");
}
@@ -268,45 +283,30 @@ public DataStore initialize(Map dsInfos) {
@Override
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
logger.debug("In attachCluster for ONTAP primary storage");
- if (dataStore == null) {
- throw new InvalidParameterValueException("attachCluster: dataStore should not be null");
- }
- if (scope == null) {
- throw new InvalidParameterValueException("attachCluster: scope should not be null");
- }
-
- StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
- if (storagePool == null) {
- s_logger.error("attachCluster : Storage Pool not found for id: " + dataStore.getId());
- throw new CloudRuntimeException("attachCluster : Storage Pool not found for id: " + dataStore.getId());
- }
- s_logger.info("Found the Storage Pool: " + storagePool.getName() + " for id: " + dataStore.getId());
PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore);
- // TODO- need to check if no host to connect then throw exception or just continue
- logger.debug("attachCluster: Eligible Up and Enabled hosts: {} in cluster {}", hostsToConnect, primaryStore.getClusterId());
- Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId());
- primaryStore.setDetails(details);
+ logger.debug(" datastore object received is {} ",primaryStore );
+
+ logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryStore.getClusterId()));
+
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId());
StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details);
- //TODO - check if no host to connect then also need to create access group without initiators
- try {
- AccessGroup accessGroupRequest = new AccessGroup();
- accessGroupRequest.setHostsToConnect(hostsToConnect);
- accessGroupRequest.setScope(scope);
- accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
- logger.info("attachCluster: Creating access group on storage system for cluster");
- strategy.createAccessGroup(accessGroupRequest);
- } catch (Exception e) {
- throw new CloudRuntimeException("attachCluster: Failed to create access group on storage system for cluster. Exception: " + e.getMessage());
- }
- logger.debug("attachCluster: Attaching the pool to each of the host in the cluster");
+ ExportPolicy exportPolicy = new ExportPolicy();
+ AccessGroup accessGroupRequest = new AccessGroup();
+ accessGroupRequest.setHostsToConnect(hostsToConnect);
+ accessGroupRequest.setScope(scope);
+ primaryStore.setDetails(details);// setting details as it does not come from cloudstack
+ accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
+ accessGroupRequest.setPolicy(exportPolicy);
+ strategy.createAccessGroup(accessGroupRequest);
+
+ logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId());
for (HostVO host : hostsToConnect) {
try {
- _storageMgr.connectHostToSharedPool(host, primaryStore.getId());
- logger.debug("attachCluster: Successfully established a connection between host {} and storage pool {}", host.getId(), primaryStore.getId());
+ _storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {
- logger.warn("attachCluster: Unable to establish a connection between " + host + " and " + primaryStore, e);
+ logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
return false;
}
}
@@ -322,44 +322,28 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) {
logger.debug("In attachZone for ONTAP primary storage");
- if (dataStore == null) {
- throw new InvalidParameterValueException("attachZone: dataStore should not be null");
- }
- if (scope == null) {
- throw new InvalidParameterValueException("attachZone: scope should not be null");
- }
-// List hostsIdentifier = new ArrayList<>();
- StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
- if (storagePool == null) {
- s_logger.error("attachZone : Storage Pool not found for id: " + dataStore.getId());
- throw new CloudRuntimeException("attachZone : Storage Pool not found for id: " + dataStore.getId());
- }
+
PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM);
- // TODO- need to check if no host to connect then throw exception or just continue
- logger.debug("attachZone: Eligible Up and Enabled hosts: {}", hostsToConnect);
+ logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
- Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId());
- primaryStore.setDetails(details);
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId());
StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details);
- try {
- AccessGroup accessGroupRequest = new AccessGroup();
- accessGroupRequest.setHostsToConnect(hostsToConnect);
- accessGroupRequest.setScope(scope);
- accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
- logger.info("attachCluster: Creating access group on storage system for zone");
- strategy.createAccessGroup(accessGroupRequest);
- } catch (Exception e) {
- throw new CloudRuntimeException("attachZone: Failed to create access group on storage system for zone with Exception: " + e.getMessage());
- }
- logger.debug("attachCluster: Attaching the pool to each of the host in the zone");
+ ExportPolicy exportPolicy = new ExportPolicy();
+ AccessGroup accessGroupRequest = new AccessGroup();
+ accessGroupRequest.setHostsToConnect(hostsToConnect);
+ accessGroupRequest.setScope(scope);
+ primaryStore.setDetails(details); // setting details as it does not come from cloudstack
+ accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
+ accessGroupRequest.setPolicy(exportPolicy);
+ strategy.createAccessGroup(accessGroupRequest);
+
for (HostVO host : hostsToConnect) {
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
- logger.debug("attachCluster: Successfully established a connection between host {} and storage pool {}", host.getId(), primaryStore.getId());
} catch (Exception e) {
logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
- return false;
+ return false;
}
}
_dataStoreHelper.attachZone(dataStore);
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java
index 8812eef3a95f..30c698995a8f 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java
@@ -16,147 +16,176 @@
* specific language governing permissions and limitations
* under the License.
*/
+
package org.apache.cloudstack.storage.listener;
-import com.cloud.agent.AgentManager;
-import com.cloud.agent.api.Answer;
+import javax.inject.Inject;
+
import com.cloud.agent.api.ModifyStoragePoolCommand;
-import com.cloud.host.HostVO;
-import com.cloud.host.dao.HostDao;
-import com.cloud.storage.DataStoreRole;
-import com.cloud.storage.StoragePool;
+import com.cloud.agent.api.ModifyStoragePoolAnswer;
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.alert.AlertManager;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.dao.StoragePoolHostDao;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.DeleteStoragePoolCommand;
+import com.cloud.host.Host;
+import com.cloud.storage.StoragePool;
import com.cloud.utils.exception.CloudRuntimeException;
-import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
-import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
-import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import javax.inject.Inject;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import com.cloud.host.dao.HostDao;
-/**
- * OntapHostListener handles host lifecycle events for ONTAP storage pools.
- *
- * For ONTAP iSCSI storage pools:
- * - The igroup (initiator group) is created/updated in OntapPrimaryDatastoreLifecycle.attachCluster()
- * - The actual iSCSI target discovery and login is handled by StorageManager via ModifyStoragePoolCommand
- * - This listener simply manages the storage pool-host relationship in the database
- *
- * For ONTAP NFS storage pools:
- * - The export policy is configured during storage pool creation
- * - The actual NFS mount is handled by StorageManager via ModifyStoragePoolCommand
- * - This listener simply manages the storage pool-host relationship in the database
- */
public class OntapHostListener implements HypervisorHostListener {
protected Logger logger = LogManager.getLogger(getClass());
- @Inject private HostDao hostDao;
- @Inject private AgentManager agentMgr;
- @Inject private PrimaryDataStoreDao storagePoolDao;
- @Inject private DataStoreManager dataStoreMgr;
- @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+ @Inject
+ private AgentManager _agentMgr;
+ @Inject
+ private AlertManager _alertMgr;
+ @Inject
+ private PrimaryDataStoreDao _storagePoolDao;
+ @Inject
+ private HostDao _hostDao;
@Inject private StoragePoolHostDao storagePoolHostDao;
- @Override
- public boolean hostAdded(long hostId) {
- HostVO host = hostDao.findById(hostId);
+ @Override
+ public boolean hostConnect(long hostId, long poolId) {
+ logger.info("Connect to host " + hostId + " from pool " + poolId);
+ Host host = _hostDao.findById(hostId);
if (host == null) {
- logger.error("hostAdded: Host {} not found", hostId);
+ logger.error("host was not found with id : {}", hostId);
return false;
}
- if (host.getClusterId() == null) {
- logger.error("hostAdded: Host {} has no associated cluster", hostId);
+ // TODO add host type check also since we support only KVM for now, host.getHypervisorType().equals(HypervisorType.KVM)
+ StoragePool pool = _storagePoolDao.findById(poolId);
+ if (pool == null) {
+ logger.error("Failed to connect host - storage pool not found with id: {}", poolId);
return false;
}
+ logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName());
+ try {
+ // Create the ModifyStoragePoolCommand to send to the agent
+ // Note: Always send command even if database entry exists, because agent may have restarted
+ // and lost in-memory pool registration. The command handler is idempotent.
+ ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool);
+
+ Answer answer = _agentMgr.easySend(hostId, cmd);
+
+ if (answer == null) {
+ throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command (%s)", pool));
+ }
+
+ if (!answer.getResult()) {
+ String msg = String.format("Unable to attach storage pool %s to host %d", pool, hostId);
+
+ _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg);
+
+ throw new CloudRuntimeException(String.format(
+ "Unable to establish a connection from agent to storage pool %s due to %s", pool, answer.getDetails()));
+ }
- logger.info("hostAdded: Host {} added to cluster {}", hostId, host.getClusterId());
+ // Get the mount path from the answer
+ ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer;
+ StoragePoolInfo poolInfo = mspAnswer.getPoolInfo();
+ if (poolInfo == null) {
+ throw new CloudRuntimeException("ModifyStoragePoolAnswer returned null poolInfo");
+ }
+
+ String localPath = poolInfo.getLocalPath();
+ logger.info("Storage pool {} successfully mounted at: {}", pool.getName(), localPath);
+
+ // Update or create the storage_pool_host_ref entry with the correct local_path
+ StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId);
+
+ if (storagePoolHost == null) {
+ storagePoolHost = new StoragePoolHostVO(poolId, hostId, localPath);
+ storagePoolHostDao.persist(storagePoolHost);
+ logger.info("Created storage_pool_host_ref entry for pool {} and host {}", pool.getName(), host.getName());
+ } else {
+ storagePoolHost.setLocalPath(localPath);
+ storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost);
+ logger.info("Updated storage_pool_host_ref entry with local_path: {}", localPath);
+ }
+
+ // Update pool capacity/usage information
+ StoragePoolVO poolVO = _storagePoolDao.findById(poolId);
+ if (poolVO != null && poolInfo.getCapacityBytes() > 0) {
+ poolVO.setCapacityBytes(poolInfo.getCapacityBytes());
+ poolVO.setUsedBytes(poolInfo.getCapacityBytes() - poolInfo.getAvailableBytes());
+ _storagePoolDao.update(poolVO.getId(), poolVO);
+ logger.info("Updated storage pool capacity: {} GB, used: {} GB", poolInfo.getCapacityBytes() / (1024 * 1024 * 1024), (poolInfo.getCapacityBytes() - poolInfo.getAvailableBytes()) / (1024 * 1024 * 1024));
+ }
+
+ } catch (Exception e) {
+ logger.error("Exception while connecting host {} to storage pool {}", host.getName(), pool.getName(), e);
+ // CRITICAL: Don't throw exception - it crashes the agent and causes restart loops
+ // Return false to indicate failure without crashing
+ return false;
+ }
return true;
}
@Override
- public boolean hostConnect(long hostId, long storagePoolId) {
- logger.debug("hostConnect: Connecting host {} to storage pool {}", hostId, storagePoolId);
+ public boolean hostDisconnected(Host host, StoragePool pool) {
+ logger.info("Disconnect from host " + host.getId() + " from pool " + pool.getName());
- HostVO host = hostDao.findById(hostId);
- if (host == null) {
- logger.error("hostConnect: Host {} not found", hostId);
+ Host hostToremove = _hostDao.findById(host.getId());
+ if (hostToremove == null) {
+ logger.error("Failed to add host by HostListener as host was not found with id : {}", host.getId());
return false;
}
-
- // Create or update the storage pool host mapping in the database
- // The actual storage pool connection (iSCSI login or NFS mount) is handled
- // by the StorageManager via ModifyStoragePoolCommand sent to the host agent
- StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId);
- StoragePool storagePool = (StoragePool)dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
- if (storagePoolHost == null) {
- storagePoolHost = new StoragePoolHostVO(storagePoolId, hostId, "");
- storagePoolHostDao.persist(storagePoolHost);
- ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool);
- Answer answer = agentMgr.easySend(host.getId(), cmd);
- if (answer == null || !answer.getResult()) {
- storagePoolDao.expunge(storagePool.getId());
- throw new CloudRuntimeException("attachCluster: Failed to attach storage pool to host: " + host.getId() +
- " due to " + (answer != null ? answer.getDetails() : "no answer from agent"));
+ // TODO add storage pool get validation
+ logger.info("Disconnecting host {} from ONTAP storage pool {}", host.getName(), pool.getName());
+
+ try {
+ DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand(pool);
+ long hostId = host.getId();
+ Answer answer = _agentMgr.easySend(hostId, cmd);
+
+ if (answer != null && answer.getResult()) {
+ logger.info("Successfully disconnected host {} from ONTAP storage pool {}", host.getName(), pool.getName());
+ return true;
+ } else {
+ String errMsg = (answer != null) ? answer.getDetails() : "Unknown error";
+ logger.warn("Failed to disconnect host {} from storage pool {}. Error: {}", host.getName(), pool.getName(), errMsg);
+ return false;
}
- logger.info("Connection established between storage pool {} and host {}", storagePool, host);
- } else {
- // TODO: Update any necessary details if needed, by fetching OntapVolume info from ONTAP
- logger.debug("hostConnect: Storage pool-host mapping already exists for pool {} and host {}",
- storagePool.getName(), host.getName());
+ } catch (Exception e) {
+ logger.error("Exception while disconnecting host {} from storage pool {}", host.getName(), pool.getName(), e);
+ return false;
}
-
- return true;
}
@Override
- public boolean hostDisconnected(long hostId, long storagePoolId) {
- logger.debug("hostDisconnected: Disconnecting host {} from storage pool {}",
- hostId, storagePoolId);
-
- StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId);
- if (storagePoolHost != null) {
- storagePoolHostDao.deleteStoragePoolHostDetails(hostId, storagePoolId);
- logger.info("hostDisconnected: Removed storage pool-host mapping for pool {} and host {}",
- storagePoolId, hostId);
- } else {
- logger.debug("hostDisconnected: No storage pool-host mapping found for pool {} and host {}",
- storagePoolId, hostId);
- }
-
- return true;
+ public boolean hostDisconnected(long hostId, long poolId) {
+ return false;
}
@Override
public boolean hostAboutToBeRemoved(long hostId) {
- HostVO host = hostDao.findById(hostId);
- if (host == null) {
- logger.error("hostAboutToBeRemoved: Host {} not found", hostId);
- return false;
- }
-
- logger.info("hostAboutToBeRemoved: Host {} about to be removed from cluster {}",
- hostId, host.getClusterId());
-
- // Note: When a host is removed, the igroup initiator should be removed in
- // the appropriate lifecycle method, not here
- return true;
+ return false;
}
@Override
public boolean hostRemoved(long hostId, long clusterId) {
- logger.info("hostRemoved: Host {} removed from cluster {}", hostId, clusterId);
- return true;
+ return false;
}
@Override
public boolean hostEnabled(long hostId) {
- logger.debug("hostEnabled: Host {} enabled", hostId);
- return true;
+ return false;
+ }
+
+ @Override
+ public boolean hostAdded(long hostId) {
+ return false;
}
-}
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java
old mode 100644
new mode 100755
index 8f75ff05660a..91bfd0a8584c
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java
@@ -42,7 +42,7 @@ public class OntapPrimaryDatastoreProvider implements PrimaryDataStoreProvider {
private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreProvider.class);
private OntapPrimaryDatastoreDriver primaryDatastoreDriver;
private OntapPrimaryDatastoreLifecycle primaryDatastoreLifecycle;
- private OntapHostListener hostListener;
+ private HypervisorHostListener listener;
public OntapPrimaryDatastoreProvider() {
s_logger.info("OntapPrimaryDatastoreProvider initialized");
@@ -59,7 +59,7 @@ public DataStoreDriver getDataStoreDriver() {
@Override
public HypervisorHostListener getHostListener() {
- return hostListener;
+ return listener;
}
@Override
@@ -73,7 +73,7 @@ public boolean configure(Map params) {
s_logger.trace("OntapPrimaryDatastoreProvider: configure: Called");
primaryDatastoreDriver = ComponentContext.inject(OntapPrimaryDatastoreDriver.class);
primaryDatastoreLifecycle = ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class);
- hostListener = ComponentContext.inject(OntapHostListener.class);
+ listener = ComponentContext.inject(OntapHostListener.class);
return true;
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java
index 6bb6ad1fef73..5947212efd96 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java
@@ -19,6 +19,7 @@
package org.apache.cloudstack.storage.provider;
+import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
import org.apache.cloudstack.storage.service.StorageStrategy;
@@ -39,6 +40,7 @@ public static StorageStrategy getStrategy(OntapStorage ontapStorage) {
case NFS3:
if (!ontapStorage.getIsDisaggregated()) {
UnifiedNASStrategy unifiedNASStrategy = new UnifiedNASStrategy(ontapStorage);
+ ComponentContext.inject(unifiedNASStrategy);
unifiedNASStrategy.setOntapStorage(ontapStorage);
return unifiedNASStrategy;
}
@@ -46,6 +48,7 @@ public static StorageStrategy getStrategy(OntapStorage ontapStorage) {
case ISCSI:
if (!ontapStorage.getIsDisaggregated()) {
UnifiedSANStrategy unifiedSANStrategy = new UnifiedSANStrategy(ontapStorage);
+ ComponentContext.inject(unifiedSANStrategy);
unifiedSANStrategy.setOntapStorage(ontapStorage);
return unifiedSANStrategy;
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java
index b142bac44a45..7a450cd9d164 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java
@@ -32,6 +32,7 @@
import org.apache.cloudstack.storage.feign.model.IpInterface;
import org.apache.cloudstack.storage.feign.model.IscsiService;
import org.apache.cloudstack.storage.feign.model.Job;
+import org.apache.cloudstack.storage.feign.model.Nas;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
import org.apache.cloudstack.storage.feign.model.Svm;
import org.apache.cloudstack.storage.feign.model.Volume;
@@ -181,6 +182,8 @@ public Volume createStorageVolume(String volumeName, Long size) {
Volume volumeRequest = new Volume();
Svm svm = new Svm();
svm.setName(svmName);
+ Nas nas = new Nas();
+ nas.setPath(Constants.SLASH + volumeName);
volumeRequest.setName(volumeName);
volumeRequest.setSvm(svm);
@@ -189,10 +192,8 @@ public Volume createStorageVolume(String volumeName, Long size) {
aggr.setUuid(aggregates.get(0).getUuid());
volumeRequest.setAggregates(List.of(aggr));
volumeRequest.setSize(size);
- // Make the POST API call to create the volume
+ volumeRequest.setNas(nas);
try {
- // Create URI for POST CreateVolume API
- // Call the VolumeFeignClient to create the volume
JobResponse jobResponse = volumeFeignClient.createVolumeWithJob(authHeader, volumeRequest);
if (jobResponse == null || jobResponse.getJob() == null) {
throw new CloudRuntimeException("Failed to initiate volume creation for " + volumeName);
@@ -226,8 +227,37 @@ public Volume createStorageVolume(String volumeName, Long size) {
throw new CloudRuntimeException("Mismatch in created volume name. Expected: " + volumeName + ", Found: " + createdVolume.getName());
}
s_logger.info("Volume created successfully: " + volumeName);
- // Return the created Volume object
- return createdVolume;
+ try {
+ Map queryParams = Map.of(Constants.NAME, volumeName);
+ s_logger.debug("Fetching volume details for: " + volumeName);
+
+ OntapResponse ontapVolume = volumeFeignClient.getVolume(authHeader, queryParams);
+ s_logger.debug("Feign call completed. Processing response...");
+
+ if (ontapVolume == null) {
+ s_logger.error("OntapResponse is null for volume: " + volumeName);
+ throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": Response is null");
+ }
+ s_logger.debug("OntapResponse is not null. Checking records field...");
+
+ if (ontapVolume.getRecords() == null) {
+ s_logger.error("OntapResponse.records is null for volume: " + volumeName);
+ throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": Records list is null");
+ }
+ s_logger.debug("Records field is not null. Size: " + ontapVolume.getRecords().size());
+
+ if (ontapVolume.getRecords().isEmpty()) {
+ s_logger.error("OntapResponse.records is empty for volume: " + volumeName);
+ throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": No records found");
+ }
+
+ Volume volume = ontapVolume.getRecords().get(0);
+ s_logger.info("Volume retrieved successfully: " + volumeName + ", UUID: " + volume.getUuid());
+ return volume;
+ } catch (Exception e) {
+ s_logger.error("Exception while retrieving volume details for: " + volumeName, e);
+ throw new CloudRuntimeException("Failed to fetch volume: " + volumeName + ". Error: " + e.getMessage(), e);
+ }
}
/**
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java
index cb3079691c94..4d6948b1c01f 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java
@@ -19,30 +19,61 @@
package org.apache.cloudstack.storage.service;
+import com.cloud.agent.api.Answer;
+import com.cloud.host.HostVO;
+import com.cloud.storage.Storage;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+import feign.FeignException;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
+import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
+import org.apache.cloudstack.storage.command.CreateObjectCommand;
import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.JobFeignClient;
import org.apache.cloudstack.storage.feign.client.NASFeignClient;
+import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
+import org.apache.cloudstack.storage.feign.model.ExportPolicy;
+import org.apache.cloudstack.storage.feign.model.ExportRule;
+import org.apache.cloudstack.storage.feign.model.FileInfo;
+import org.apache.cloudstack.storage.feign.model.Job;
+import org.apache.cloudstack.storage.feign.model.Nas;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.feign.model.response.JobResponse;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
import org.apache.cloudstack.storage.service.model.AccessGroup;
import org.apache.cloudstack.storage.service.model.CloudStackVolume;
import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.cloudstack.storage.volume.VolumeObject;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
+import javax.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
import java.util.Map;
public class UnifiedNASStrategy extends NASStrategy {
private static final Logger s_logger = LogManager.getLogger(UnifiedNASStrategy.class);
- // Add missing Feign client setup for NAS operations
private final FeignClientFactory feignClientFactory;
private final NASFeignClient nasFeignClient;
+ private final VolumeFeignClient volumeFeignClient;
+ private final JobFeignClient jobFeignClient;
+ @Inject private VolumeDao volumeDao;
+ @Inject private EndPointSelector epSelector;
public UnifiedNASStrategy(OntapStorage ontapStorage) {
super(ontapStorage);
String baseURL = Constants.HTTPS + ontapStorage.getManagementLIF();
- // Initialize FeignClientFactory and create NAS client
this.feignClientFactory = new FeignClientFactory();
this.nasFeignClient = feignClientFactory.createClient(NASFeignClient.class, baseURL);
+ this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class,baseURL );
+ this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL );
}
public void setOntapStorage(OntapStorage ontapStorage) {
@@ -51,8 +82,22 @@ public void setOntapStorage(OntapStorage ontapStorage) {
@Override
public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume) {
- //TODO: Implement NAS volume creation using nasFeignClient
- return null;
+ s_logger.info("createCloudStackVolume: Create cloudstack volume " + cloudstackVolume);
+ try {
+ // Step 1: set cloudstack volume metadata
+ String volumeUuid = updateCloudStackVolumeMetadata(cloudstackVolume.getDatastoreId(), cloudstackVolume.getVolumeInfo());
+ // Step 2: Send command to KVM host to create qcow2 file using qemu-img
+ Answer answer = createVolumeOnKVMHost(cloudstackVolume.getVolumeInfo());
+ if (answer == null || !answer.getResult()) {
+ String errMsg = answer != null ? answer.getDetails() : "Failed to create qcow2 on KVM host";
+ s_logger.error("createCloudStackVolumeForTypeVolume: " + errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+ return cloudstackVolume;
+ }catch (Exception e) {
+ s_logger.error("createCloudStackVolumeForTypeVolume: error occured " + e);
+ throw new CloudRuntimeException(e);
+ }
}
@Override
@@ -74,8 +119,26 @@ CloudStackVolume getCloudStackVolume(CloudStackVolume cloudstackVolume) {
@Override
public AccessGroup createAccessGroup(AccessGroup accessGroup) {
- //TODO
- return null;
+ s_logger.info("createAccessGroup: Create access group {}: " , accessGroup);
+ Map details = accessGroup.getPrimaryDataStoreInfo().getDetails();
+ String svmName = details.get(Constants.SVM_NAME);
+ String volumeUUID = details.get(Constants.VOLUME_UUID);
+ String volumeName = details.get(Constants.VOLUME_NAME);
+
+ // Create the export policy
+ ExportPolicy policyRequest = createExportPolicyRequest(accessGroup,svmName,volumeName);
+ try {
+ ExportPolicy createdPolicy = createExportPolicy(svmName, policyRequest);
+ s_logger.info("ExportPolicy created: {}, now attaching this policy to storage pool volume", createdPolicy.getName());
+ // attach export policy to volume of storage pool
+ assignExportPolicyToVolume(volumeUUID,createdPolicy.getName());
+ s_logger.info("Successfully assigned exportPolicy {} to volume {}", policyRequest.getName(), volumeName);
+ accessGroup.setPolicy(policyRequest);
+ return accessGroup;
+ }catch(Exception e){
+ s_logger.error("Exception occurred while creating access group: " + e);
+ throw new CloudRuntimeException("Failed to create access group: " + e);
+ }
}
@Override
@@ -104,4 +167,265 @@ void enableLogicalAccess(Map values) {
void disableLogicalAccess(Map values) {
//TODO
}
+
+
+ private ExportPolicy createExportPolicy(String svmName, ExportPolicy policy) {
+ s_logger.info("Creating export policy: {} for SVM: {}", policy, svmName);
+
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ nasFeignClient.createExportPolicy(authHeader, policy);
+ OntapResponse policiesResponse = null;
+ try {
+ Map queryParams = Map.of(Constants.NAME, policy.getName());
+ policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams);
+ if (policiesResponse == null || policiesResponse.getRecords().isEmpty()) {
+ throw new CloudRuntimeException("Export policy " + policy.getName() + " was not created on ONTAP. " +
+ "Received successful response but policy does not exist.");
+ }
+ s_logger.info("Export policy created and verified successfully: " + policy.getName());
+ } catch (FeignException e) {
+ s_logger.error("Failed to verify export policy creation: " + policy.getName(), e);
+ throw new CloudRuntimeException("Export policy creation verification failed: " + e.getMessage());
+ }
+ s_logger.info("Export policy created successfully with name {}", policy.getName());
+ return policiesResponse.getRecords().get(0);
+ } catch (FeignException e) {
+ s_logger.error("Failed to create export policy: {}", policy, e);
+ throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage());
+ } catch (Exception e) {
+ s_logger.error("Exception while creating export policy: {}", policy, e);
+ throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage());
+ }
+ }
+
+ private void deleteExportPolicy(String svmName, String policyName) {
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ Map queryParams = Map.of(Constants.NAME, policyName);
+ OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams);
+
+ if (policiesResponse == null ) {
+ s_logger.warn("Export policy not found for deletion: {}", policyName);
+ throw new CloudRuntimeException("Export policy not found : " + policyName);
+ }
+ String policyId = String.valueOf(policiesResponse.getRecords().get(0).getId());
+ nasFeignClient.deleteExportPolicyById(authHeader, policyId);
+ s_logger.info("Export policy deleted successfully: {}", policyName);
+ } catch (Exception e) {
+ s_logger.error("Failed to delete export policy: {}", policyName, e);
+ throw new CloudRuntimeException("Failed to delete export policy: " + policyName);
+ }
+ }
+
+ private void assignExportPolicyToVolume(String volumeUuid, String policyName) {
+ s_logger.info("Assigning export policy: {} to volume: {}", policyName, volumeUuid);
+
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ // Create Volume update object with NAS configuration
+ Volume volumeUpdate = new Volume();
+ Nas nas = new Nas();
+ ExportPolicy policy = new ExportPolicy();
+ policy.setName(policyName);
+ nas.setExportPolicy(policy);
+ volumeUpdate.setNas(nas);
+
+ try {
+ JobResponse jobResponse = volumeFeignClient.updateVolumeRebalancing(authHeader, volumeUuid, volumeUpdate);
+ if (jobResponse == null || jobResponse.getJob() == null) {
+ throw new CloudRuntimeException("Failed to attach policy " + policyName + "to volume " + volumeUuid);
+ }
+ String jobUUID = jobResponse.getJob().getUuid();
+ //Create URI for GET Job API
+ int jobRetryCount = 0;
+ Job createVolumeJob = null;
+ while(createVolumeJob == null || !createVolumeJob.getState().equals(Constants.JOB_SUCCESS)) {
+ if(jobRetryCount >= Constants.JOB_MAX_RETRIES) {
+ s_logger.error("Job to update volume " + volumeUuid + " did not complete within expected time.");
+ throw new CloudRuntimeException("Job to update volume " + volumeUuid + " did not complete within expected time.");
+ }
+ try {
+ createVolumeJob = jobFeignClient.getJobByUUID(authHeader, jobUUID);
+ if (createVolumeJob == null) {
+ s_logger.warn("Job with UUID " + jobUUID + " not found. Retrying...");
+ } else if (createVolumeJob.getState().equals(Constants.JOB_FAILURE)) {
+ throw new CloudRuntimeException("Job to update volume " + volumeUuid + " failed with error: " + createVolumeJob.getMessage());
+ }
+ } catch (FeignException.FeignClientException e) {
+ throw new CloudRuntimeException("Failed to fetch job status: " + e.getMessage());
+ }
+ jobRetryCount++;
+ Thread.sleep(Constants.CREATE_VOLUME_CHECK_SLEEP_TIME); // Sleep for 2 seconds before polling again
+ }
+ } catch (Exception e) {
+ s_logger.error("Exception while updating volume: ", e);
+ throw new CloudRuntimeException("Failed to update volume: " + e.getMessage());
+ }
+ s_logger.info("Export policy successfully assigned to volume: {}", volumeUuid);
+ } catch (FeignException e) {
+ s_logger.error("Failed to assign export policy to volume: {}", volumeUuid, e);
+ throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage());
+ } catch (Exception e) {
+ s_logger.error("Exception while assigning export policy to volume: {}", volumeUuid, e);
+ throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage());
+ }
+ }
+
+ private boolean createFile(String volumeUuid, String filePath, FileInfo fileInfo) {
+ s_logger.info("Creating file: {} in volume: {}", filePath, volumeUuid);
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ nasFeignClient.createFile(authHeader, volumeUuid, filePath, fileInfo);
+ s_logger.info("File created successfully: {} in volume: {}", filePath, volumeUuid);
+ return true;
+ } catch (FeignException e) {
+ s_logger.error("Failed to create file: {} in volume: {}", filePath, volumeUuid, e);
+ return false;
+ } catch (Exception e) {
+ s_logger.error("Exception while creating file: {} in volume: {}", filePath, volumeUuid, e);
+ return false;
+ }
+ }
+
+ private boolean deleteFile(String volumeUuid, String filePath) {
+ s_logger.info("Deleting file: {} from volume: {}", filePath, volumeUuid);
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ nasFeignClient.deleteFile(authHeader, volumeUuid, filePath);
+ s_logger.info("File deleted successfully: {} from volume: {}", filePath, volumeUuid);
+ return true;
+ } catch (FeignException e) {
+ s_logger.error("Failed to delete file: {} from volume: {}", filePath, volumeUuid, e);
+ return false;
+ } catch (Exception e) {
+ s_logger.error("Exception while deleting file: {} from volume: {}", filePath, volumeUuid, e);
+ return false;
+ }
+ }
+
+ private OntapResponse getFileInfo(String volumeUuid, String filePath) {
+ s_logger.debug("Getting file info for: {} in volume: {}", filePath, volumeUuid);
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ OntapResponse response = nasFeignClient.getFileResponse(authHeader, volumeUuid, filePath);
+ s_logger.debug("Retrieved file info for: {} in volume: {}", filePath, volumeUuid);
+ return response;
+ } catch (FeignException e){
+ if (e.status() == 404) {
+ s_logger.debug("File not found: {} in volume: {}", filePath, volumeUuid);
+ return null;
+ }
+ s_logger.error("Failed to get file info: {} in volume: {}", filePath, volumeUuid, e);
+ throw new CloudRuntimeException("Failed to get file info: " + e.getMessage());
+ } catch (Exception e){
+ s_logger.error("Exception while getting file info: {} in volume: {}", filePath, volumeUuid, e);
+ throw new CloudRuntimeException("Failed to get file info: " + e.getMessage());
+ }
+ }
+
+ private boolean updateFile(String volumeUuid, String filePath, FileInfo fileInfo) {
+ s_logger.info("Updating file: {} in volume: {}", filePath, volumeUuid);
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ nasFeignClient.updateFile( authHeader, volumeUuid, filePath, fileInfo);
+ s_logger.info("File updated successfully: {} in volume: {}", filePath, volumeUuid);
+ return true;
+ } catch (FeignException e) {
+ s_logger.error("Failed to update file: {} in volume: {}", filePath, volumeUuid, e);
+ return false;
+ } catch (Exception e){
+ s_logger.error("Exception while updating file: {} in volume: {}", filePath, volumeUuid, e);
+ return false;
+ }
+ }
+
+ private String generateExportPolicyName(String svmName, String volumeName){
+ return Constants.EXPORT + Constants.HYPHEN + svmName + Constants.HYPHEN + volumeName;
+ }
+
+ private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String svmName , String volumeName){
+
+ String exportPolicyName = generateExportPolicyName(svmName,volumeName);
+ ExportPolicy exportPolicy = new ExportPolicy();
+
+ List rules = new ArrayList<>();
+ ExportRule exportRule = new ExportRule();
+
+ List exportClients = new ArrayList<>();
+ List hosts = accessGroup.getHostsToConnect();
+ for (HostVO host : hosts) {
+ String hostStorageIp = host.getStorageIpAddress();
+ String ip = (hostStorageIp != null && !hostStorageIp.isEmpty())
+ ? hostStorageIp
+ : host.getPrivateIpAddress();
+ String ipToUse = ip + "/32";
+ ExportRule.ExportClient exportClient = new ExportRule.ExportClient();
+ exportClient.setMatch(ipToUse);
+ exportClients.add(exportClient);
+ }
+ exportRule.setClients(exportClients);
+ exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.any));
+ exportRule.setRoRule(List.of("sys"));
+ exportRule.setRwRule(List.of("sys"));
+ exportRule.setSuperuser(List.of("sys"));
+ rules.add(exportRule);
+
+ Svm svm = new Svm();
+ svm.setName(svmName);
+ exportPolicy.setSvm(svm);
+ exportPolicy.setRules(rules);
+ exportPolicy.setName(exportPolicyName);
+
+ return exportPolicy;
+ }
+
+ private String updateCloudStackVolumeMetadata(String dataStoreId, DataObject volumeInfo) {
+ s_logger.info("updateCloudStackVolumeMetadata called with datastoreID: {} volumeInfo: {} ", dataStoreId, volumeInfo );
+ try {
+ VolumeObject volumeObject = (VolumeObject) volumeInfo;
+ long volumeId = volumeObject.getId();
+ s_logger.info("VolumeInfo ID from VolumeObject: {}", volumeId);
+ VolumeVO volume = volumeDao.findById(volumeId);
+ if (volume == null) {
+ throw new CloudRuntimeException("Volume not found with id: " + volumeId);
+ }
+ String volumeUuid = volumeInfo.getUuid();
+ volume.setPoolType(Storage.StoragePoolType.NetworkFilesystem);
+ volume.setPoolId(Long.parseLong(dataStoreId));
+ volume.setPath(volumeUuid); // Filename for qcow2 file
+ volumeDao.update(volume.getId(), volume);
+ return volumeUuid;
+ }catch (Exception e){
+ s_logger.error("Exception while updating volumeInfo: {} in volume: {}", dataStoreId, volumeInfo.getUuid(), e);
+ throw new CloudRuntimeException("Exception while updating volumeInfo: " + e.getMessage());
+ }
+ }
+
+ private Answer createVolumeOnKVMHost(DataObject volumeInfo) {
+ s_logger.info("createVolumeOnKVMHost called with volumeInfo: {} ", volumeInfo);
+
+ try {
+ s_logger.info("createVolumeOnKVMHost: Sending CreateObjectCommand to KVM agent for volume: {}", volumeInfo.getUuid());
+ CreateObjectCommand cmd = new CreateObjectCommand(volumeInfo.getTO());
+ EndPoint ep = epSelector.select(volumeInfo);
+ if (ep == null) {
+ String errMsg = "No remote endpoint to send CreateObjectCommand, check if host is up";
+ s_logger.error(errMsg);
+ return new Answer(cmd, false, errMsg);
+ }
+ s_logger.info("createVolumeOnKVMHost: Sending command to endpoint: {}", ep.getHostAddr());
+ Answer answer = ep.sendMessage(cmd);
+ if (answer != null && answer.getResult()) {
+ s_logger.info("createVolumeOnKVMHost: Successfully created qcow2 file on KVM host");
+ } else {
+ s_logger.error("createVolumeOnKVMHost: Failed to create qcow2 file: {}",
+ answer != null ? answer.getDetails() : "null answer");
+ }
+ return answer;
+ } catch (Exception e) {
+ s_logger.error("createVolumeOnKVMHost: Exception sending CreateObjectCommand", e);
+ return new Answer(null, false, e.toString());
+ }
+ }
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java
old mode 100644
new mode 100755
index ef6fe7353291..9ff80e7cf8a9
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java
@@ -36,6 +36,7 @@ public class AccessGroup {
private PrimaryDataStoreInfo primaryDataStoreInfo;
private Scope scope;
+
public Igroup getIgroup() {
return igroup;
}
@@ -51,7 +52,6 @@ public ExportPolicy getPolicy() {
public void setPolicy(ExportPolicy policy) {
this.exportPolicy = policy;
}
-
public List getHostsToConnect() {
return hostsToConnect;
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java
index a7f5d8659d03..6c51e4630800 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java
@@ -19,6 +19,7 @@
package org.apache.cloudstack.storage.service.model;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.storage.feign.model.FileInfo;
import org.apache.cloudstack.storage.feign.model.Lun;
@@ -26,7 +27,8 @@ public class CloudStackVolume {
private FileInfo file;
private Lun lun;
-
+ private String datastoreId;
+ private DataObject volumeInfo; // This is needed as we need DataObject to be passed to agent to create volume
public FileInfo getFile() {
return file;
}
@@ -42,4 +44,16 @@ public Lun getLun() {
public void setLun(Lun lun) {
this.lun = lun;
}
+ public String getDatastoreId() {
+ return datastoreId;
+ }
+ public void setDatastoreId(String datastoreId) {
+ this.datastoreId = datastoreId;
+ }
+ public DataObject getVolumeInfo() {
+ return volumeInfo;
+ }
+ public void setVolumeInfo(DataObject volumeInfo) {
+ this.volumeInfo = volumeInfo;
+ }
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java
index 0744777c12e5..a45fb4a5b21d 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java
@@ -35,6 +35,7 @@ public class Constants {
public static final String VOLUME_UUID = "volumeUUID";
public static final String IS_DISAGGREGATED = "isDisaggregated";
public static final String RUNNING = "running";
+ public static final String EXPORT = "export";
public static final int ONTAP_PORT = 443;
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java
index 323adfd0320c..a92d99b394fa 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java
@@ -22,10 +22,15 @@
import com.cloud.storage.ScopeType;
import com.cloud.utils.StringUtils;
import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.feign.model.Lun;
+import org.apache.cloudstack.storage.feign.model.LunSpace;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
import org.apache.cloudstack.storage.provider.StorageProviderFactory;
import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
import org.apache.cloudstack.storage.service.model.ProtocolType;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -52,6 +57,52 @@ public static String generateAuthHeader (String username, String password) {
return BASIC + StringUtils.SPACE + new String(encodedBytes);
}
+ public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, DataObject volumeObject) {
+ CloudStackVolume cloudStackVolumeRequest = null;
+
+ String protocol = details.get(Constants.PROTOCOL);
+ ProtocolType protocolType = ProtocolType.valueOf(protocol);
+ switch (protocolType) {
+ case NFS3:
+ cloudStackVolumeRequest = new CloudStackVolume();
+ cloudStackVolumeRequest.setDatastoreId(String.valueOf(storagePool.getId()));
+ cloudStackVolumeRequest.setVolumeInfo(volumeObject);
+ break;
+ case ISCSI:
+ cloudStackVolumeRequest = new CloudStackVolume();
+ Lun lunRequest = new Lun();
+ Svm svm = new Svm();
+ svm.setName(details.get(Constants.SVM_NAME));
+ lunRequest.setSvm(svm);
+
+ LunSpace lunSpace = new LunSpace();
+ lunSpace.setSize(volumeObject.getSize());
+ lunRequest.setSpace(lunSpace);
+ //Lun name is full path like in unified "/vol/VolumeName/LunName"
+ String lunFullName = Constants.VOLUME_PATH_PREFIX + storagePool.getName() + Constants.SLASH + volumeObject.getName();
+ lunRequest.setName(lunFullName);
+
+ String hypervisorType = storagePool.getHypervisor().name();
+ String osType = null;
+ switch (hypervisorType) {
+ case Constants.KVM:
+ osType = Lun.OsTypeEnum.LINUX.getValue();
+ break;
+ default:
+ String errMsg = "createCloudStackVolume : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage";
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+ lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType));
+ cloudStackVolumeRequest.setLun(lunRequest);
+ break;
+ default:
+ throw new CloudRuntimeException("createCloudStackVolumeRequestByProtocol: Unsupported protocol " + protocol);
+
+ }
+ return cloudStackVolumeRequest;
+ }
+
public static String getOSTypeFromHypervisor(String hypervisorType){
switch (hypervisorType) {
case Constants.KVM: