diff --git a/plugins/storage/volume/ontap/pom.xml b/plugins/storage/volume/ontap/pom.xml
index 10ca7935f408..3628f6f3f592 100644
--- a/plugins/storage/volume/ontap/pom.xml
+++ b/plugins/storage/volume/ontap/pom.xml
@@ -24,14 +24,14 @@
org.apache.cloudstack
cloudstack-plugins
- 4.22.0.0-SNAPSHOT
+ 4.23.0.0-SNAPSHOT
../../../pom.xml
2021.0.7
11.0
20230227
- 2.15.2
+ 2.13.4
4.5.14
1.6.2
3.8.1
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java
index e2eb6220230a..7ddce42991f5 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java
@@ -18,11 +18,8 @@
*/
package org.apache.cloudstack.storage.driver;
-import com.cloud.agent.api.Answer;
-import com.cloud.agent.api.to.DataObjectType;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO;
-import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.Host;
import com.cloud.storage.Storage;
import com.cloud.storage.StoragePool;
@@ -43,14 +40,11 @@
import org.apache.cloudstack.storage.command.CommandResult;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
-import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
import org.apache.cloudstack.storage.provider.StorageProviderFactory;
import org.apache.cloudstack.storage.service.StorageStrategy;
-import org.apache.cloudstack.storage.service.model.CloudStackVolume;
import org.apache.cloudstack.storage.service.model.ProtocolType;
import org.apache.cloudstack.storage.utils.Constants;
-import org.apache.cloudstack.storage.utils.Utility;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -87,57 +81,7 @@ public DataStoreTO getStoreTO(DataStore store) {
@Override
public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) {
- CreateCmdResult createCmdResult = null;
- String path = null;
- String errMsg = null;
- if (dataStore == null) {
- throw new InvalidParameterValueException("createAsync: dataStore should not be null");
- }
- if (dataObject == null) {
- throw new InvalidParameterValueException("createAsync: dataObject should not be null");
- }
- if (callback == null) {
- throw new InvalidParameterValueException("createAsync: callback should not be null");
- }
- try {
- s_logger.info("createAsync: Started for data store [{}] and data object [{}] of type [{}]",
- dataStore, dataObject, dataObject.getType());
- if (dataObject.getType() == DataObjectType.VOLUME) {
- path = createCloudStackVolumeForTypeVolume(dataStore, dataObject);
- createCmdResult = new CreateCmdResult(path, new Answer(null, true, null));
- } else {
- errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync";
- s_logger.error(errMsg);
- throw new CloudRuntimeException(errMsg);
- }
- } catch (Exception e) {
- errMsg = e.getMessage();
- s_logger.error("createAsync: Failed for dataObject [{}]: {}", dataObject, errMsg);
- createCmdResult = new CreateCmdResult(null, new Answer(null, false, errMsg));
- createCmdResult.setResult(e.toString());
- } finally {
- callback.complete(createCmdResult);
- }
- }
- private String createCloudStackVolumeForTypeVolume(DataStore dataStore, DataObject dataObject) {
- StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
- if(storagePool == null) {
- s_logger.error("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId());
- throw new CloudRuntimeException("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId());
- }
- Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId());
- StorageStrategy storageStrategy = getStrategyByStoragePoolDetails(details);
- s_logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(Constants.SVM_NAME));
- CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, dataObject);
- CloudStackVolume cloudStackVolume = storageStrategy.createCloudStackVolume(cloudStackVolumeRequest);
- if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL)) && cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) {
- return cloudStackVolume.getLun().getName();
- } else {
- String errMsg = "createCloudStackVolumeForTypeVolume: Volume creation failed. Lun or Lun Path is null for dataObject: " + dataObject;
- s_logger.error(errMsg);
- throw new CloudRuntimeException(errMsg);
- }
}
@Override
@@ -277,7 +221,7 @@ private StorageStrategy getStrategyByStoragePoolDetails(Map deta
}
String protocol = details.get(Constants.PROTOCOL);
OntapStorage ontapStorage = new OntapStorage(details.get(Constants.USERNAME), details.get(Constants.PASSWORD),
- details.get(Constants.MANAGEMENT_LIF), details.get(Constants.SVM_NAME), ProtocolType.valueOf(protocol),
+ details.get(Constants.MANAGEMENT_LIF), details.get(Constants.SVM_NAME), Long.parseLong(details.get(Constants.SIZE)), ProtocolType.valueOf(protocol),
Boolean.parseBoolean(details.get(Constants.IS_DISAGGREGATED)));
StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
boolean isValid = storageStrategy.connect();
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java
index ce2783add228..79d28a6075be 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java
@@ -1,5 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
package org.apache.cloudstack.storage.feign;
+import com.fasterxml.jackson.databind.ObjectMapper;
import feign.RequestInterceptor;
import feign.Retryer;
import feign.Client;
@@ -11,7 +31,6 @@
import feign.codec.EncodeException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.DeserializationFeature;
-import com.fasterxml.jackson.databind.json.JsonMapper;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
@@ -36,13 +55,11 @@ public class FeignConfiguration {
private final int retryMaxInterval = 5;
private final String ontapFeignMaxConnection = "80";
private final String ontapFeignMaxConnectionPerRoute = "20";
- private final JsonMapper jsonMapper;
+ private final ObjectMapper objectMapper;
public FeignConfiguration() {
- this.jsonMapper = JsonMapper.builder()
- .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES)
- .findAndAddModules()
- .build();
+ this.objectMapper = new ObjectMapper();
+ this.objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
public Client createClient() {
@@ -105,7 +122,7 @@ public void encode(Object object, Type bodyType, feign.RequestTemplate template)
return;
}
try {
- byte[] jsonBytes = jsonMapper.writeValueAsBytes(object);
+ byte[] jsonBytes = objectMapper.writeValueAsBytes(object);
template.body(jsonBytes, StandardCharsets.UTF_8);
template.header("Content-Type", "application/json");
} catch (JsonProcessingException e) {
@@ -126,7 +143,7 @@ public Object decode(Response response, Type type) throws IOException, DecodeExc
try (InputStream bodyStream = response.body().asInputStream()) {
json = new String(bodyStream.readAllBytes(), StandardCharsets.UTF_8);
logger.debug("Decoding JSON response: {}", json);
- return jsonMapper.readValue(json, jsonMapper.getTypeFactory().constructType(type));
+ return objectMapper.readValue(json, objectMapper.getTypeFactory().constructType(type));
} catch (IOException e) {
logger.error("Error decoding JSON response. Status: {}, Raw body: {}", response.status(), json, e);
throw new DecodeException(response.status(), "Error decoding JSON response", response.request(), e);
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java
new file mode 100644
index 000000000000..4acbbecf6573
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java
@@ -0,0 +1,16 @@
+package org.apache.cloudstack.storage.feign.client;
+
+import feign.Headers;
+import feign.Param;
+import feign.QueryMap;
+import feign.RequestLine;
+import org.apache.cloudstack.storage.feign.model.IpInterface;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+
+import java.util.Map;
+
+public interface NetworkFeignClient {
+ @RequestLine("GET /api/network/ip/interfaces")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getNetworkIpInterfaces(@Param("authHeader") String authHeader, @QueryMap Map queryParams);
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java
index dd2463d7f3bb..868aab293518 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java
@@ -18,28 +18,32 @@
*/
package org.apache.cloudstack.storage.feign.client;
+import feign.QueryMap;
import org.apache.cloudstack.storage.feign.model.Igroup;
+import org.apache.cloudstack.storage.feign.model.IscsiService;
import org.apache.cloudstack.storage.feign.model.Lun;
import org.apache.cloudstack.storage.feign.model.LunMap;
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
import feign.Headers;
import feign.Param;
import feign.RequestLine;
-import java.net.URI;
+import java.util.Map;
//TODO: Proper URLs should be added in the RequestLine annotations below
public interface SANFeignClient {
+ // iSCSI Service APIs
+ @RequestLine("GET /api/protocols/san/iscsi/services")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getIscsiServices(@Param("authHeader") String authHeader, @QueryMap Map queryMap);
// LUN Operation APIs
- @RequestLine("POST /")
- @Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"})
- OntapResponse createLun(@Param("authHeader") String authHeader,
- @Param("returnRecords") boolean returnRecords,
- Lun lun);
+ @RequestLine("POST /api/storage/luns?return_records={returnRecords}")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse createLun(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords, Lun lun);
- @RequestLine("GET /")
+ @RequestLine("GET /api/storage/luns")
@Headers({"Authorization: {authHeader}"})
- OntapResponse getLunResponse(@Param("authHeader") String authHeader);
+ OntapResponse getLunResponse(@Param("authHeader") String authHeader, @QueryMap Map queryMap);
@RequestLine("GET /{uuid}")
@Headers({"Authorization: {authHeader}"})
@@ -54,36 +58,35 @@ OntapResponse createLun(@Param("authHeader") String authHeader,
void deleteLun(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
// iGroup Operation APIs
- @RequestLine("POST /")
- @Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"})
- OntapResponse createIgroup(@Param("authHeader") String authHeader,
- @Param("returnRecords") boolean returnRecords,
- Igroup igroupRequest);
+ @RequestLine("POST /api/protocols/san/igroups?return_records={returnRecords}")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse createIgroup(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords, Igroup igroupRequest);
- @RequestLine("GET /")
- @Headers({"Authorization: {authHeader}"}) // TODO: Check this again, uuid should be part of the path?
- OntapResponse getIgroupResponse(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
+ @RequestLine("GET /api/protocols/san/igroups")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getIgroupResponse(@Param("authHeader") String authHeader, @QueryMap Map queryMap);
@RequestLine("GET /{uuid}")
@Headers({"Authorization: {authHeader}"})
Igroup getIgroupByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
- @RequestLine("DELETE /{uuid}")
+ @RequestLine("DELETE /api/protocols/san/igroups/{uuid}")
@Headers({"Authorization: {authHeader}"})
- void deleteIgroup(@Param("baseUri") URI baseUri, @Param("authHeader") String authHeader, @Param("uuid") String uuid);
+ void deleteIgroup(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
// LUN Maps Operation APIs
- @RequestLine("POST /")
- @Headers({"Authorization: {authHeader}"})
- OntapResponse createLunMap(@Param("authHeader") String authHeader, LunMap lunMap);
+ @RequestLine("POST /api/protocols/san/lun-maps")
+ @Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"})
+ OntapResponse createLunMap(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords, LunMap lunMap);
+
- @RequestLine("GET /")
+ @RequestLine("GET /api/protocols/san/lun-maps")
@Headers({"Authorization: {authHeader}"})
- OntapResponse getLunMapResponse(@Param("authHeader") String authHeader);
+ OntapResponse getLunMapResponse(@Param("authHeader") String authHeader, @QueryMap Map queryMap);
- @RequestLine("DELETE /{lunUuid}/{igroupUuid}")
+ @RequestLine("DELETE /api/protocols/san/lun-maps/{lunUuid}/{igroupUuid}")
@Headers({"Authorization: {authHeader}"})
void deleteLunMap(@Param("authHeader") String authHeader,
- @Param("lunUuid") String lunUuid,
- @Param("igroupUuid") String igroupUuid);
+ @Param("lunUuid") String lunUUID,
+ @Param("igroupUuid") String igroupUUID);
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java
index 9a2c76639221..717409664662 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java
@@ -18,22 +18,30 @@
*/
package org.apache.cloudstack.storage.feign.client;
+import feign.QueryMap;
import org.apache.cloudstack.storage.feign.model.Volume;
import org.apache.cloudstack.storage.feign.model.response.JobResponse;
import feign.Headers;
import feign.Param;
import feign.RequestLine;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+
+import java.util.Map;
public interface VolumeFeignClient {
@RequestLine("DELETE /api/storage/volumes/{uuid}")
@Headers({"Authorization: {authHeader}"})
- void deleteVolume(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
+ JobResponse deleteVolume(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
@RequestLine("POST /api/storage/volumes")
@Headers({"Authorization: {authHeader}"})
JobResponse createVolumeWithJob(@Param("authHeader") String authHeader, Volume volumeRequest);
+ @RequestLine("GET /api/storage/volumes")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getAllVolumes(@Param("authHeader") String authHeader, @QueryMap Map queryParams);
+
@RequestLine("GET /api/storage/volumes/{uuid}")
@Headers({"Authorization: {authHeader}"})
Volume getVolumeByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
@@ -42,4 +50,3 @@ public interface VolumeFeignClient {
@Headers({"Accept: {acceptHeader}", "Authorization: {authHeader}"})
JobResponse updateVolumeRebalancing(@Param("acceptHeader") String acceptHeader, @Param("uuid") String uuid, Volume volumeRequest);
}
-
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Aggregate.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Aggregate.java
index c91f0f87eb27..8ac1717604a5 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Aggregate.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Aggregate.java
@@ -22,12 +22,43 @@
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonValue;
import java.util.Objects;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class Aggregate {
+ // Replace previous enum with case-insensitive mapping
+ public enum StateEnum {
+ ONLINE("online");
+ private final String value;
+
+ StateEnum(String value) {
+ this.value = value;
+ }
+
+ @JsonValue
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(value);
+ }
+
+ @JsonCreator
+ public static StateEnum fromValue(String text) {
+ for (StateEnum b : StateEnum.values()) {
+ if (String.valueOf(b.value).equals(text)) {
+ return b;
+ }
+ }
+ return null;
+ }
+ }
@JsonProperty("name")
private String name = null;
@@ -40,6 +71,13 @@ public int hashCode() {
@JsonProperty("uuid")
private String uuid = null;
+ @JsonProperty("state")
+ private StateEnum state = null;
+
+ @JsonProperty("space")
+ private AggregateSpace space = null;
+
+
public Aggregate name(String name) {
this.name = name;
return this;
@@ -65,6 +103,21 @@ public void setUuid(String uuid) {
this.uuid = uuid;
}
+ public StateEnum getState() {
+ return state;
+ }
+
+ public AggregateSpace getSpace() {
+ return space;
+ }
+
+ public Double getAvailableBlockStorageSpace() {
+ if (space != null && space.blockStorage != null) {
+ return space.blockStorage.available;
+ }
+ return null;
+ }
+
@Override
public boolean equals(java.lang.Object o) {
@@ -95,4 +148,18 @@ public String toString() {
return "DiskAggregates [name=" + name + ", uuid=" + uuid + "]";
}
+ public static class AggregateSpace {
+ @JsonProperty("block_storage")
+ private AggregateSpaceBlockStorage blockStorage = null;
+ }
+
+ public static class AggregateSpaceBlockStorage {
+ @JsonProperty("available")
+ private Double available = null;
+ @JsonProperty("size")
+ private Double size = null;
+ @JsonProperty("used")
+ private Double used = null;
+ }
+
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IpInterface.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IpInterface.java
new file mode 100644
index 000000000000..c15798a42b70
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IpInterface.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.List;
+import java.util.Objects;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class IpInterface {
+ @JsonProperty("uuid")
+ private String uuid;
+
+ @JsonProperty("name")
+ private String name;
+
+ @JsonProperty("ip")
+ private IpInfo ip;
+
+ @JsonProperty("svm")
+ private Svm svm;
+
+ @JsonProperty("services")
+ private List services;
+
+ // Getters and setters
+ public String getUuid() {
+ return uuid;
+ }
+
+ public void setUuid(String uuid) {
+ this.uuid = uuid;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public IpInfo getIp() {
+ return ip;
+ }
+
+ public void setIp(IpInfo ip) {
+ this.ip = ip;
+ }
+
+ public Svm getSvm() {
+ return svm;
+ }
+
+ public void setSvm(Svm svm) {
+ this.svm = svm;
+ }
+
+ public List getServices() {
+ return services;
+ }
+
+ public void setServices(List services) {
+ this.services = services;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ IpInterface that = (IpInterface) o;
+ return Objects.equals(uuid, that.uuid) &&
+ Objects.equals(name, that.name) &&
+ Objects.equals(ip, that.ip) &&
+ Objects.equals(svm, that.svm) &&
+ Objects.equals(services, that.services);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(uuid, name, ip, svm, services);
+ }
+
+ @Override
+ public String toString() {
+ return "IpInterface{" +
+ "uuid='" + uuid + '\'' +
+ ", name='" + name + '\'' +
+ ", ip=" + ip +
+ ", svm=" + svm +
+ ", services=" + services +
+ '}';
+ }
+
+ // Nested class for IP information
+ @JsonIgnoreProperties(ignoreUnknown = true)
+ @JsonInclude(JsonInclude.Include.NON_NULL)
+ public static class IpInfo {
+ @JsonProperty("address")
+ private String address;
+
+ public String getAddress() {
+ return address;
+ }
+
+ public void setAddress(String address) {
+ this.address = address;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ IpInfo ipInfo = (IpInfo) o;
+ return Objects.equals(address, ipInfo.address);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(address);
+ }
+
+ @Override
+ public String toString() {
+ return "IpInfo{" +
+ "address='" + address + '\'' +
+ '}';
+ }
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IscsiService.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IscsiService.java
new file mode 100644
index 000000000000..06d1ca92735f
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IscsiService.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+
+/**
+ * An iSCSI service defines the properties of the iSCSI target for an SVM.
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class IscsiService {
+ @JsonProperty("enabled")
+ private Boolean enabled = null;
+
+ @JsonProperty("svm")
+ private Svm svm = null;
+
+ @JsonProperty("target")
+ private IscsiServiceTarget target = null;
+
+ public Boolean isEnabled() {
+ return enabled;
+ }
+
+ public void setEnabled(Boolean enabled) {
+ this.enabled = enabled;
+ }
+
+ public Svm getSvm() {
+ return svm;
+ }
+
+ public void setSvm(Svm svm) {
+ this.svm = svm;
+ }
+
+ public IscsiServiceTarget getTarget() {
+ return target;
+ }
+
+ public void setTarget(IscsiServiceTarget target) {
+ this.target = target;
+ }
+
+ @Override
+ public String toString() {
+ return "IscsiService{" +
+ "enabled=" + enabled +
+ ", svm=" + svm +
+ ", target=" + target +
+ '}';
+ }
+
+ /**
+ * iSCSI target information
+ */
+ @JsonIgnoreProperties(ignoreUnknown = true)
+ @JsonInclude(JsonInclude.Include.NON_NULL)
+ public static class IscsiServiceTarget {
+ @JsonProperty("alias")
+ private String alias = null;
+
+ @JsonProperty("name")
+ private String name = null;
+
+ public String getAlias() {
+ return alias;
+ }
+
+ public void setAlias(String alias) {
+ this.alias = alias;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public String toString() {
+ return "IscsiServiceTarget{" +
+ "alias='" + alias + '\'' +
+ ", name='" + name + '\'' +
+ '}';
+ }
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Job.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Job.java
index 04b5611a8dab..cdeaf2ed8388 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Job.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Job.java
@@ -87,14 +87,14 @@ public String toString() {
}
public static class Links {
- @JsonProperty("message")
+ @JsonProperty("self")
private Self self;
public Self getSelf() { return self; }
public void setSelf(Self self) { this.self = self; }
}
public static class Self {
- @JsonProperty("message")
+ @JsonProperty("href")
private String href;
public String getHref() { return href; }
public void setHref(String href) { this.href = href; }
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java
index eb56b4a5d5e5..8b450331b50a 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java
@@ -26,14 +26,16 @@ public class OntapStorage {
private final String password;
private final String managementLIF;
private final String svmName;
+ private final Long size;
private final ProtocolType protocolType;
private final Boolean isDisaggregated;
- public OntapStorage(String username, String password, String managementLIF, String svmName, ProtocolType protocolType, Boolean isDisaggregated) {
+ public OntapStorage(String username, String password, String managementLIF, String svmName, Long size, ProtocolType protocolType, Boolean isDisaggregated) {
this.username = username;
this.password = password;
this.managementLIF = managementLIF;
this.svmName = svmName;
+ this.size = size;
this.protocolType = protocolType;
this.isDisaggregated = isDisaggregated;
}
@@ -54,6 +56,10 @@ public String getSvmName() {
return svmName;
}
+ public Long getSize() {
+ return size;
+ }
+
public ProtocolType getProtocol() {
return protocolType;
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java
index f1a226739365..65821739f1b2 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java
@@ -144,4 +144,4 @@ public int hashCode() {
@JsonInclude(JsonInclude.Include.NON_NULL)
public static class Links { }
-}
\ No newline at end of file
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java
index 01b013f606dd..d12d6838ccb5 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java
@@ -23,12 +23,14 @@
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
+import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.resource.ResourceManager;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolAutomation;
import com.cloud.utils.exception.CloudRuntimeException;
import com.google.common.base.Preconditions;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
@@ -38,12 +40,18 @@
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao;
import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
import org.apache.cloudstack.storage.provider.StorageProviderFactory;
import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
import org.apache.cloudstack.storage.service.model.ProtocolType;
import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -59,6 +67,10 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
@Inject private StorageManager _storageMgr;
@Inject private ResourceManager _resourceMgr;
@Inject private PrimaryDataStoreHelper _dataStoreHelper;
+ @Inject private PrimaryDataStoreDao storagePoolDao;
+ @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+ @Inject private PrimaryDataStoreDetailsDao _datastoreDetailsDao;
+ @Inject private StoragePoolAutomation _storagePoolAutomation;
private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class);
// ONTAP minimum volume size is 1.56 GB (1677721600 bytes)
@@ -81,6 +93,7 @@ public DataStore initialize(Map dsInfos) {
String storagePoolName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
+ boolean managed = (boolean) dsInfos.get("managed");
String tags = (String) dsInfos.get("tags");
Boolean isTagARule = (Boolean) dsInfos.get("isTagARule");
@@ -132,6 +145,11 @@ public DataStore initialize(Map dsInfos) {
parameters.setHypervisorType(clusterVO.getHypervisorType());
}
+ s_logger.debug("ONTAP primary storage will be created as " + (managed ? "managed" : "unmanaged"));
+ if (!managed) {
+ throw new CloudRuntimeException("ONTAP primary storage must be managed");
+ }
+
// Required ONTAP detail keys
Set requiredKeys = Set.of(
Constants.USERNAME,
@@ -180,50 +198,59 @@ public DataStore initialize(Map dsInfos) {
// Default for IS_DISAGGREGATED if needed
details.putIfAbsent(Constants.IS_DISAGGREGATED, "false");
- // Determine storage pool type and path based on protocol
- String path;
ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL));
- switch (protocol) {
- case NFS3:
- parameters.setType(Storage.StoragePoolType.NetworkFilesystem);
- path = details.get(Constants.MANAGEMENT_LIF) + ":/" + storagePoolName;
- s_logger.info("Setting NFS path for storage pool: " + path);
- break;
- case ISCSI:
- parameters.setType(Storage.StoragePoolType.Iscsi);
- path = "iqn.1992-08.com.netapp:" + details.get(Constants.SVM_NAME) + "." + storagePoolName;
- s_logger.info("Setting iSCSI path for storage pool: " + path);
- break;
- default:
- throw new CloudRuntimeException("Unsupported protocol: " + protocol + ", cannot create primary storage");
- }
// Connect to ONTAP and create volume
+ long volumeSize = Long.parseLong(details.get(Constants.SIZE));
OntapStorage ontapStorage = new OntapStorage(
details.get(Constants.USERNAME),
details.get(Constants.PASSWORD),
details.get(Constants.MANAGEMENT_LIF),
details.get(Constants.SVM_NAME),
+ volumeSize,
protocol,
Boolean.parseBoolean(details.get(Constants.IS_DISAGGREGATED).toLowerCase()));
StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
boolean isValid = storageStrategy.connect();
if (isValid) {
- long volumeSize = Long.parseLong(details.get(Constants.SIZE));
- s_logger.info("Creating ONTAP volume '" + storagePoolName + "' with size: " + volumeSize + " bytes (" +
- (volumeSize / (1024 * 1024 * 1024)) + " GB)");
- storageStrategy.createStorageVolume(storagePoolName, volumeSize);
+ // Get the DataLIF for data access
+ String dataLIF = storageStrategy.getNetworkInterface();
+ if (dataLIF == null || dataLIF.isEmpty()) {
+ throw new CloudRuntimeException("Failed to retrieve Data LIF from ONTAP, cannot create primary storage");
+ }
+ s_logger.info("Using Data LIF for storage access: " + dataLIF);
+ details.put(Constants.DATA_LIF, dataLIF);
} else {
throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage");
}
+ // Determine storage pool type, path and port based on protocol
+ String path;
+ int port;
+ switch (protocol) {
+ case NFS3:
+ parameters.setType(Storage.StoragePoolType.NetworkFilesystem);
+ path = Constants.SLASH + storagePoolName;
+ port = 2049;
+ s_logger.info("Setting NFS path for storage pool: " + path + ", port: " + port);
+ break;
+ case ISCSI:
+ parameters.setType(Storage.StoragePoolType.Iscsi);
+ path = storageStrategy.getStoragePath();
+ port = 3260;
+ s_logger.info("Setting iSCSI path for storage pool: " + path + ", port: " + port);
+ break;
+ default:
+ throw new CloudRuntimeException("Unsupported protocol: " + protocol + ", cannot create primary storage");
+ }
+
// Set parameters for primary data store
- parameters.setHost(details.get(Constants.MANAGEMENT_LIF));
- parameters.setPort(Constants.ONTAP_PORT);
+ parameters.setHost(details.get(Constants.DATA_LIF));
+ parameters.setPort(port);
parameters.setPath(path);
- parameters.setTags(tags != null ? tags : "");
- parameters.setIsTagARule(isTagARule != null ? isTagARule : Boolean.FALSE);
+ parameters.setTags(tags);
+ parameters.setIsTagARule(isTagARule);
parameters.setDetails(details);
parameters.setUuid(UUID.randomUUID().toString());
parameters.setZoneId(zoneId);
@@ -231,7 +258,7 @@ public DataStore initialize(Map dsInfos) {
parameters.setClusterId(clusterId);
parameters.setName(storagePoolName);
parameters.setProviderName(providerName);
- parameters.setManaged(true); // ONTAP storage is always managed
+ parameters.setManaged(managed);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
@@ -241,16 +268,46 @@ public DataStore initialize(Map dsInfos) {
@Override
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
logger.debug("In attachCluster for ONTAP primary storage");
- PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore;
- List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore);
+ if (dataStore == null) {
+ throw new InvalidParameterValueException("attachCluster: dataStore should not be null");
+ }
+ if (scope == null) {
+ throw new InvalidParameterValueException("attachCluster: scope should not be null");
+ }
- logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId()));
+ StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
+ if (storagePool == null) {
+ s_logger.error("attachCluster : Storage Pool not found for id: " + dataStore.getId());
+ throw new CloudRuntimeException("attachCluster : Storage Pool not found for id: " + dataStore.getId());
+ }
+ s_logger.info("Found the Storage Pool: " + storagePool.getName() + " for id: " + dataStore.getId());
+ PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
+ List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore);
+ // TODO- need to check if no host to connect then throw exception or just continue
+ logger.debug("attachCluster: Eligible Up and Enabled hosts: {} in cluster {}", hostsToConnect, primaryStore.getClusterId());
+
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId());
+ primaryStore.setDetails(details);
+ StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details);
+ //TODO - check if no host to connect then also need to create access group without initiators
+ try {
+ AccessGroup accessGroupRequest = new AccessGroup();
+ accessGroupRequest.setHostsToConnect(hostsToConnect);
+ accessGroupRequest.setScope(scope);
+ accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
+ logger.info("attachCluster: Creating access group on storage system for cluster");
+ strategy.createAccessGroup(accessGroupRequest);
+ } catch (Exception e) {
+ throw new CloudRuntimeException("attachCluster: Failed to create access group on storage system for cluster. Exception: " + e.getMessage());
+ }
+ logger.debug("attachCluster: Attaching the pool to each of the host in the cluster");
for (HostVO host : hostsToConnect) {
- // TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
try {
- _storageMgr.connectHostToSharedPool(host, dataStore.getId());
+ _storageMgr.connectHostToSharedPool(host, primaryStore.getId());
+ logger.debug("attachCluster: Successfully established a connection between host {} and storage pool {}", host.getId(), primaryStore.getId());
} catch (Exception e) {
- logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
+ logger.warn("attachCluster: Unable to establish a connection between " + host + " and " + primaryStore, e);
+ return false;
}
}
_dataStoreHelper.attachCluster(dataStore);
@@ -265,15 +322,44 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) {
logger.debug("In attachZone for ONTAP primary storage");
+ if (dataStore == null) {
+ throw new InvalidParameterValueException("attachZone: dataStore should not be null");
+ }
+ if (scope == null) {
+ throw new InvalidParameterValueException("attachZone: scope should not be null");
+ }
+// List hostsIdentifier = new ArrayList<>();
+ StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
+ if (storagePool == null) {
+ s_logger.error("attachZone : Storage Pool not found for id: " + dataStore.getId());
+ throw new CloudRuntimeException("attachZone : Storage Pool not found for id: " + dataStore.getId());
+ }
+ PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM);
-
- logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
+ // TODO- need to check if no host to connect then throw exception or just continue
+ logger.debug("attachZone: Eligible Up and Enabled hosts: {}", hostsToConnect);
+
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId());
+ primaryStore.setDetails(details);
+ StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details);
+ try {
+ AccessGroup accessGroupRequest = new AccessGroup();
+ accessGroupRequest.setHostsToConnect(hostsToConnect);
+ accessGroupRequest.setScope(scope);
+ accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
+ logger.info("attachCluster: Creating access group on storage system for zone");
+ strategy.createAccessGroup(accessGroupRequest);
+ } catch (Exception e) {
+ throw new CloudRuntimeException("attachZone: Failed to create access group on storage system for zone with Exception: " + e.getMessage());
+ }
+ logger.debug("attachCluster: Attaching the pool to each of the host in the zone");
for (HostVO host : hostsToConnect) {
- // TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
+ logger.debug("attachCluster: Successfully established a connection between host {} and storage pool {}", host.getId(), primaryStore.getId());
} catch (Exception e) {
logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
+ return false;
}
}
_dataStoreHelper.attachZone(dataStore);
@@ -282,19 +368,66 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper
@Override
public boolean maintain(DataStore store) {
- return true;
+ _storagePoolAutomation.maintain(store);
+ return _dataStoreHelper.maintain(store);
}
@Override
public boolean cancelMaintain(DataStore store) {
- return true;
+ _storagePoolAutomation.cancelMaintain(store);
+ return _dataStoreHelper.cancelMaintain(store);
}
@Override
public boolean deleteDataStore(DataStore store) {
- return true;
+ s_logger.info("deleteDataStore: Starting deletion process for storage pool id: {}", store.getId());
+
+ long storagePoolId = store.getId();
+ // Get the StoragePool details
+ StoragePool storagePool = _storageMgr.getStoragePool(storagePoolId);
+ if (storagePool == null) {
+ s_logger.warn("deleteDataStore: Storage pool not found for id: {}, skipping deletion", storagePoolId);
+ return true; // Return true since the entity doesn't exist
+ }
+
+ try {
+ // Fetch storage pool details
+ Map details = _datastoreDetailsDao.listDetailsKeyPairs(storagePoolId);
+ if (details == null || details.isEmpty()) {
+ s_logger.warn("deleteDataStore: No details found for storage pool id: {}, proceeding with CS entity deletion only", storagePoolId);
+ return _dataStoreHelper.deletePrimaryDataStore(store);
+ }
+
+ s_logger.info("deleteDataStore: Deleting access groups for storage pool '{}'", storagePool.getName());
+
+ // Get the storage strategy to interact with ONTAP
+ StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details);
+
+ // Cast DataStore to PrimaryDataStoreInfo to get full details
+ PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) store;
+ primaryDataStoreInfo.setDetails(details);
+
+ // Create AccessGroup object with PrimaryDataStoreInfo
+ AccessGroup accessGroup = new AccessGroup();
+ accessGroup.setPrimaryDataStoreInfo(primaryDataStoreInfo);
+
+ // Call deleteAccessGroup - it will figure out scope, protocol, and all details internally
+ storageStrategy.deleteAccessGroup(accessGroup);
+
+ s_logger.info("deleteDataStore: Successfully deleted access groups for storage pool '{}'", storagePool.getName());
+
+ } catch (Exception e) {
+ s_logger.error("deleteDataStore: Failed to delete access groups for storage pool id: {}. Error: {}",
+ storagePoolId, e.getMessage(), e);
+ // Continue with CloudStack entity deletion even if ONTAP cleanup fails
+ s_logger.warn("deleteDataStore: Proceeding with CloudStack entity deletion despite ONTAP cleanup failure");
+ }
+
+ // Delete the CloudStack primary data store entity
+ return _dataStoreHelper.deletePrimaryDataStore(store);
}
+
@Override
public boolean migrateToObjectStore(DataStore store) {
return true;
@@ -307,12 +440,12 @@ public void updateStoragePool(StoragePool storagePool, Map detai
@Override
public void enableStoragePool(DataStore store) {
-
+ _dataStoreHelper.enable(store);
}
@Override
public void disableStoragePool(DataStore store) {
-
+ _dataStoreHelper.disable(store);
}
@Override
@@ -325,4 +458,3 @@ public void changeStoragePoolScopeToCluster(DataStore store, ClusterScope cluste
}
}
-
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java
new file mode 100644
index 000000000000..8812eef3a95f
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.listener;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.ModifyStoragePoolCommand;
+import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
+import com.cloud.storage.DataStoreRole;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.inject.Inject;
+
+/**
+ * OntapHostListener handles host lifecycle events for ONTAP storage pools.
+ *
+ * For ONTAP iSCSI storage pools:
+ * - The igroup (initiator group) is created/updated in OntapPrimaryDatastoreLifecycle.attachCluster()
+ * - The actual iSCSI target discovery and login is handled by StorageManager via ModifyStoragePoolCommand
+ * - This listener simply manages the storage pool-host relationship in the database
+ *
+ * For ONTAP NFS storage pools:
+ * - The export policy is configured during storage pool creation
+ * - The actual NFS mount is handled by StorageManager via ModifyStoragePoolCommand
+ * - This listener simply manages the storage pool-host relationship in the database
+ */
+public class OntapHostListener implements HypervisorHostListener {
+ protected Logger logger = LogManager.getLogger(getClass());
+
+ @Inject private HostDao hostDao;
+ @Inject private AgentManager agentMgr;
+ @Inject private PrimaryDataStoreDao storagePoolDao;
+ @Inject private DataStoreManager dataStoreMgr;
+ @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+ @Inject private StoragePoolHostDao storagePoolHostDao;
+
+ @Override
+ public boolean hostAdded(long hostId) {
+ HostVO host = hostDao.findById(hostId);
+
+ if (host == null) {
+ logger.error("hostAdded: Host {} not found", hostId);
+ return false;
+ }
+
+ if (host.getClusterId() == null) {
+ logger.error("hostAdded: Host {} has no associated cluster", hostId);
+ return false;
+ }
+
+ logger.info("hostAdded: Host {} added to cluster {}", hostId, host.getClusterId());
+ return true;
+ }
+
+ @Override
+ public boolean hostConnect(long hostId, long storagePoolId) {
+ logger.debug("hostConnect: Connecting host {} to storage pool {}", hostId, storagePoolId);
+
+ HostVO host = hostDao.findById(hostId);
+ if (host == null) {
+ logger.error("hostConnect: Host {} not found", hostId);
+ return false;
+ }
+
+ // Create or update the storage pool host mapping in the database
+ // The actual storage pool connection (iSCSI login or NFS mount) is handled
+ // by the StorageManager via ModifyStoragePoolCommand sent to the host agent
+ StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId);
+ StoragePool storagePool = (StoragePool)dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
+ if (storagePoolHost == null) {
+ storagePoolHost = new StoragePoolHostVO(storagePoolId, hostId, "");
+ storagePoolHostDao.persist(storagePoolHost);
+ ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool);
+ Answer answer = agentMgr.easySend(host.getId(), cmd);
+ if (answer == null || !answer.getResult()) {
+ storagePoolDao.expunge(storagePool.getId());
+ throw new CloudRuntimeException("attachCluster: Failed to attach storage pool to host: " + host.getId() +
+ " due to " + (answer != null ? answer.getDetails() : "no answer from agent"));
+ }
+ logger.info("Connection established between storage pool {} and host {}", storagePool, host);
+ } else {
+ // TODO: Update any necessary details if needed, by fetching OntapVolume info from ONTAP
+ logger.debug("hostConnect: Storage pool-host mapping already exists for pool {} and host {}",
+ storagePool.getName(), host.getName());
+ }
+
+ return true;
+ }
+
+ @Override
+ public boolean hostDisconnected(long hostId, long storagePoolId) {
+ logger.debug("hostDisconnected: Disconnecting host {} from storage pool {}",
+ hostId, storagePoolId);
+
+ StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId);
+ if (storagePoolHost != null) {
+ storagePoolHostDao.deleteStoragePoolHostDetails(hostId, storagePoolId);
+ logger.info("hostDisconnected: Removed storage pool-host mapping for pool {} and host {}",
+ storagePoolId, hostId);
+ } else {
+ logger.debug("hostDisconnected: No storage pool-host mapping found for pool {} and host {}",
+ storagePoolId, hostId);
+ }
+
+ return true;
+ }
+
+ @Override
+ public boolean hostAboutToBeRemoved(long hostId) {
+ HostVO host = hostDao.findById(hostId);
+ if (host == null) {
+ logger.error("hostAboutToBeRemoved: Host {} not found", hostId);
+ return false;
+ }
+
+ logger.info("hostAboutToBeRemoved: Host {} about to be removed from cluster {}",
+ hostId, host.getClusterId());
+
+ // Note: When a host is removed, the igroup initiator should be removed in
+ // the appropriate lifecycle method, not here
+ return true;
+ }
+
+ @Override
+ public boolean hostRemoved(long hostId, long clusterId) {
+ logger.info("hostRemoved: Host {} removed from cluster {}", hostId, clusterId);
+ return true;
+ }
+
+ @Override
+ public boolean hostEnabled(long hostId) {
+ logger.debug("hostEnabled: Host {} enabled", hostId);
+ return true;
+ }
+}
+
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java
index fa2f14692c77..8f75ff05660a 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java
@@ -27,6 +27,7 @@
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
import org.apache.cloudstack.storage.driver.OntapPrimaryDatastoreDriver;
import org.apache.cloudstack.storage.lifecycle.OntapPrimaryDatastoreLifecycle;
+import org.apache.cloudstack.storage.listener.OntapHostListener;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.springframework.stereotype.Component;
@@ -41,6 +42,7 @@ public class OntapPrimaryDatastoreProvider implements PrimaryDataStoreProvider {
private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreProvider.class);
private OntapPrimaryDatastoreDriver primaryDatastoreDriver;
private OntapPrimaryDatastoreLifecycle primaryDatastoreLifecycle;
+ private OntapHostListener hostListener;
public OntapPrimaryDatastoreProvider() {
s_logger.info("OntapPrimaryDatastoreProvider initialized");
@@ -57,7 +59,7 @@ public DataStoreDriver getDataStoreDriver() {
@Override
public HypervisorHostListener getHostListener() {
- return null;
+ return hostListener;
}
@Override
@@ -71,6 +73,7 @@ public boolean configure(Map params) {
s_logger.trace("OntapPrimaryDatastoreProvider: configure: Called");
primaryDatastoreDriver = ComponentContext.inject(OntapPrimaryDatastoreDriver.class);
primaryDatastoreLifecycle = ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class);
+ hostListener = ComponentContext.inject(OntapHostListener.class);
return true;
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java
index 0f9706335784..b142bac44a45 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java
@@ -22,10 +22,15 @@
import com.cloud.utils.exception.CloudRuntimeException;
import feign.FeignException;
import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.AggregateFeignClient;
import org.apache.cloudstack.storage.feign.client.JobFeignClient;
+import org.apache.cloudstack.storage.feign.client.NetworkFeignClient;
+import org.apache.cloudstack.storage.feign.client.SANFeignClient;
import org.apache.cloudstack.storage.feign.client.SvmFeignClient;
import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
import org.apache.cloudstack.storage.feign.model.Aggregate;
+import org.apache.cloudstack.storage.feign.model.IpInterface;
+import org.apache.cloudstack.storage.feign.model.IscsiService;
import org.apache.cloudstack.storage.feign.model.Job;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
import org.apache.cloudstack.storage.feign.model.Svm;
@@ -34,11 +39,13 @@
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
import org.apache.cloudstack.storage.service.model.AccessGroup;
import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
import org.apache.cloudstack.storage.utils.Constants;
import org.apache.cloudstack.storage.utils.Utility;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
@@ -53,9 +60,12 @@
public abstract class StorageStrategy {
// Replace @Inject Feign clients with FeignClientFactory
private final FeignClientFactory feignClientFactory;
+ private final AggregateFeignClient aggregateFeignClient;
private final VolumeFeignClient volumeFeignClient;
private final SvmFeignClient svmFeignClient;
private final JobFeignClient jobFeignClient;
+ private final NetworkFeignClient networkFeignClient;
+ private final SANFeignClient sanFeignClient;
protected OntapStorage storage;
@@ -72,9 +82,12 @@ public StorageStrategy(OntapStorage ontapStorage) {
s_logger.info("Initializing StorageStrategy with base URL: " + baseURL);
// Initialize FeignClientFactory and create clients
this.feignClientFactory = new FeignClientFactory();
+ this.aggregateFeignClient = feignClientFactory.createClient(AggregateFeignClient.class, baseURL);
this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class, baseURL);
this.svmFeignClient = feignClientFactory.createClient(SvmFeignClient.class, baseURL);
this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL);
+ this.networkFeignClient = feignClientFactory.createClient(NetworkFeignClient.class, baseURL);
+ this.sanFeignClient = feignClientFactory.createClient(SANFeignClient.class, baseURL);
}
// Connect method to validate ONTAP cluster, credentials, protocol, and SVM
@@ -110,12 +123,31 @@ public boolean connect() {
s_logger.error("iSCSI protocol is not enabled on SVM " + svmName);
throw new CloudRuntimeException("iSCSI protocol is not enabled on SVM " + svmName);
}
+ // TODO: Implement logic to select appropriate aggregate based on storage requirements
List aggrs = svm.getAggregates();
if (aggrs == null || aggrs.isEmpty()) {
s_logger.error("No aggregates are assigned to SVM " + svmName);
throw new CloudRuntimeException("No aggregates are assigned to SVM " + svmName);
}
- this.aggregates = aggrs;
+ // Set the aggregates which are according to the storage requirements
+ for (Aggregate aggr : aggrs) {
+ s_logger.debug("Found aggregate: " + aggr.getName() + " with UUID: " + aggr.getUuid());
+ Aggregate aggrResp = aggregateFeignClient.getAggregateByUUID(authHeader, aggr.getUuid());
+ if (!Objects.equals(aggrResp.getState(), Aggregate.StateEnum.ONLINE)) {
+ s_logger.warn("Aggregate " + aggr.getName() + " is not in online state. Skipping this aggregate.");
+ continue;
+ } else if (aggrResp.getSpace() == null || aggrResp.getAvailableBlockStorageSpace() == null ||
+ aggrResp.getAvailableBlockStorageSpace() <= storage.getSize().doubleValue()) {
+ s_logger.warn("Aggregate " + aggr.getName() + " does not have sufficient available space. Skipping this aggregate.");
+ continue;
+ }
+ s_logger.info("Selected aggregate: " + aggr.getName() + " for volume operations.");
+ this.aggregates = List.of(aggr);
+ }
+ if (this.aggregates == null || this.aggregates.isEmpty()) {
+ s_logger.error("No suitable aggregates found on SVM " + svmName + " for volume creation.");
+ throw new CloudRuntimeException("No suitable aggregates found on SVM " + svmName + " for volume creation.");
+ }
s_logger.info("Successfully connected to ONTAP cluster and validated ONTAP details provided");
} catch (Exception e) {
throw new CloudRuntimeException("Failed to connect to ONTAP cluster: " + e.getMessage(), e);
@@ -131,7 +163,7 @@ public boolean connect() {
* throw exception in case of disaggregated ONTAP storage
*
* @param volumeName the name of the volume to create
- * @param size the size of the volume in bytes
+ * @param size the size of the volume in bytes
* @return the created Volume object
*/
public Volume createStorageVolume(String volumeName, Long size) {
@@ -152,7 +184,10 @@ public Volume createStorageVolume(String volumeName, Long size) {
volumeRequest.setName(volumeName);
volumeRequest.setSvm(svm);
- volumeRequest.setAggregates(aggregates);
+ Aggregate aggr = new Aggregate();
+ aggr.setName(aggregates.get(0).getName());
+ aggr.setUuid(aggregates.get(0).getUuid());
+ volumeRequest.setAggregates(List.of(aggr));
volumeRequest.setSize(size);
// Make the POST API call to create the volume
try {
@@ -165,35 +200,34 @@ public Volume createStorageVolume(String volumeName, Long size) {
String jobUUID = jobResponse.getJob().getUuid();
//Create URI for GET Job API
- int jobRetryCount = 0;
- Job createVolumeJob = null;
- while(createVolumeJob == null || !createVolumeJob.getState().equals(Constants.JOB_SUCCESS)) {
- if(jobRetryCount >= Constants.JOB_MAX_RETRIES) {
- s_logger.error("Job to create volume " + volumeName + " did not complete within expected time.");
- throw new CloudRuntimeException("Job to create volume " + volumeName + " did not complete within expected time.");
- }
-
- try {
- createVolumeJob = jobFeignClient.getJobByUUID(authHeader, jobUUID);
- if (createVolumeJob == null) {
- s_logger.warn("Job with UUID " + jobUUID + " not found. Retrying...");
- } else if (createVolumeJob.getState().equals(Constants.JOB_FAILURE)) {
- throw new CloudRuntimeException("Job to create volume " + volumeName + " failed with error: " + createVolumeJob.getMessage());
- }
- } catch (FeignException.FeignClientException e) {
- throw new CloudRuntimeException("Failed to fetch job status: " + e.getMessage());
- }
-
- jobRetryCount++;
- Thread.sleep(Constants.CREATE_VOLUME_CHECK_SLEEP_TIME); // Sleep for 2 seconds before polling again
+ Boolean jobSucceeded = jobPollForSuccess(jobUUID);
+ if (!jobSucceeded) {
+ s_logger.error("Volume creation job failed for volume: " + volumeName);
+ throw new CloudRuntimeException("Volume creation job failed for volume: " + volumeName);
}
+ s_logger.info("Volume creation job completed successfully for volume: " + volumeName);
} catch (Exception e) {
s_logger.error("Exception while creating volume: ", e);
throw new CloudRuntimeException("Failed to create volume: " + e.getMessage());
}
+ // Verify if the Volume has been created and set the Volume object
+ // Call the VolumeFeignClient to get the created volume details
+ OntapResponse volumesResponse = volumeFeignClient.getAllVolumes(authHeader, Map.of(Constants.NAME, volumeName));
+ if (volumesResponse == null || volumesResponse.getRecords() == null || volumesResponse.getRecords().isEmpty()) {
+ s_logger.error("Volume " + volumeName + " not found after creation.");
+ throw new CloudRuntimeException("Volume " + volumeName + " not found after creation.");
+ }
+ Volume createdVolume = volumesResponse.getRecords().get(0);
+ if (createdVolume == null) {
+ s_logger.error("Failed to retrieve details of the created volume " + volumeName);
+ throw new CloudRuntimeException("Failed to retrieve details of the created volume " + volumeName);
+ } else if (createdVolume.getName() == null || !createdVolume.getName().equals(volumeName)) {
+ s_logger.error("Mismatch in created volume name. Expected: " + volumeName + ", Found: " + createdVolume.getName());
+ throw new CloudRuntimeException("Mismatch in created volume name. Expected: " + volumeName + ", Found: " + createdVolume.getName());
+ }
s_logger.info("Volume created successfully: " + volumeName);
- //TODO
- return null;
+ // Return the created Volume object
+ return createdVolume;
}
/**
@@ -204,8 +238,7 @@ public Volume createStorageVolume(String volumeName, Long size) {
* @param volume the volume to update
* @return the updated Volume object
*/
- public Volume updateStorageVolume(Volume volume)
- {
+ public Volume updateStorageVolume(Volume volume) {
//TODO
return null;
}
@@ -217,9 +250,24 @@ public Volume updateStorageVolume(Volume volume)
*
* @param volume the volume to delete
*/
- public void deleteStorageVolume(Volume volume)
- {
- //TODO
+ public void deleteStorageVolume(Volume volume) {
+ s_logger.info("Deleting ONTAP volume by name: " + volume.getName() + " and uuid: " + volume.getUuid());
+ // Calling the VolumeFeignClient to delete the volume
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ try {
+ // TODO: Implement lun and file deletion, if any, before deleting the volume
+ JobResponse jobResponse = volumeFeignClient.deleteVolume(authHeader, volume.getUuid());
+ Boolean jobSucceeded = jobPollForSuccess(jobResponse.getJob().getUuid());
+ if (!jobSucceeded) {
+ s_logger.error("Volume deletion job failed for volume: " + volume.getName());
+ throw new CloudRuntimeException("Volume deletion job failed for volume: " + volume.getName());
+ }
+ s_logger.info("Volume deleted successfully: " + volume.getName());
+ } catch (FeignException.FeignClientException e) {
+ s_logger.error("Exception while deleting volume: ", e);
+ throw new CloudRuntimeException("Failed to delete volume: " + e.getMessage());
+ }
+ s_logger.info("ONTAP volume deletion process completed for volume: " + volume.getName());
}
/**
@@ -230,18 +278,113 @@ public void deleteStorageVolume(Volume volume)
* @param volume the volume to retrieve
* @return the retrieved Volume object
*/
- public Volume getStorageVolume(Volume volume)
- {
+ public Volume getStorageVolume(Volume volume) {
//TODO
return null;
}
+ /**
+ * Get the storage path based on protocol.
+ * For iSCSI: Returns the iSCSI target IQN (e.g., iqn.1992-08.com.netapp:sn.xxx:vs.3)
+ * For NFS: Returns the mount path (to be implemented)
+ *
+ * @return the storage path as a String
+ */
+ public String getStoragePath() {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ String targetIqn = null;
+ try {
+ if (storage.getProtocol() == ProtocolType.ISCSI) {
+ // For iSCSI, fetch the target IQN from the iSCSI service
+ s_logger.info("Fetching iSCSI target IQN for SVM: {}", storage.getSvmName());
+
+ Map queryParams = new HashMap<>();
+ queryParams.put(Constants.SVM_DOT_NAME, storage.getSvmName());
+ queryParams.put("fields", "enabled,target");
+ queryParams.put("max_records", "1");
+
+ OntapResponse response = sanFeignClient.getIscsiServices(authHeader, queryParams);
+
+ if (response == null || response.getRecords() == null || response.getRecords().isEmpty()) {
+ throw new CloudRuntimeException("No iSCSI service found for SVM: " + storage.getSvmName());
+ }
+
+ IscsiService iscsiService = response.getRecords().get(0);
+
+ if (iscsiService.getTarget() == null || iscsiService.getTarget().getName() == null) {
+ throw new CloudRuntimeException("iSCSI target IQN not found for SVM: " + storage.getSvmName());
+ }
+
+ targetIqn = iscsiService.getTarget().getName();
+ s_logger.info("Retrieved iSCSI target IQN: {}", targetIqn);
+ return targetIqn;
+
+ } else if (storage.getProtocol() == ProtocolType.NFS3) {
+ // TODO: Implement NFS path retrieval logic
+ } else {
+ throw new CloudRuntimeException("Unsupported protocol for path retrieval: " + storage.getProtocol());
+ }
+
+ } catch (FeignException.FeignClientException e) {
+ s_logger.error("Exception while retrieving storage path for protocol {}: {}", storage.getProtocol(), e.getMessage(), e);
+ throw new CloudRuntimeException("Failed to retrieve storage path: " + e.getMessage());
+ }
+ return targetIqn;
+ }
+
+
+
+ /**
+ * Get the network ip interface
+ *
+ * @return the network interface ip as a String
+ */
+
+ public String getNetworkInterface() {
+ // Feign call to get network interfaces
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ try {
+ Map queryParams = new HashMap<>();
+ queryParams.put(Constants.SVM_DOT_NAME, storage.getSvmName());
+ if (storage.getProtocol() != null) {
+ switch (storage.getProtocol()) {
+ case NFS3:
+ queryParams.put(Constants.SERVICES, Constants.DATA_NFS);
+ break;
+ case ISCSI:
+ queryParams.put(Constants.SERVICES, Constants.DATA_ISCSI);
+ break;
+ default:
+ s_logger.error("Unsupported protocol: " + storage.getProtocol());
+ throw new CloudRuntimeException("Unsupported protocol: " + storage.getProtocol());
+ }
+ }
+ queryParams.put(Constants.FIELDS, Constants.IP_ADDRESS);
+ queryParams.put(Constants.RETURN_RECORDS, Constants.TRUE);
+ OntapResponse response =
+ networkFeignClient.getNetworkIpInterfaces(authHeader, queryParams);
+ if (response != null && response.getRecords() != null && !response.getRecords().isEmpty()) {
+ // For simplicity, return the first interface's name
+ IpInterface ipInterface = response.getRecords().get(0);
+ s_logger.info("Retrieved network interface: " + ipInterface.getIp().getAddress());
+ return ipInterface.getIp().getAddress();
+ } else {
+ throw new CloudRuntimeException("No network interfaces found for SVM " + storage.getSvmName() +
+ " for protocol " + storage.getProtocol());
+ }
+ } catch (FeignException.FeignClientException e) {
+ s_logger.error("Exception while retrieving network interfaces: ", e);
+ throw new CloudRuntimeException("Failed to retrieve network interfaces: " + e.getMessage());
+ }
+ }
+
/**
* Method encapsulates the behavior based on the opted protocol in subclasses.
* it is going to mimic
- * createLun for iSCSI, FC protocols
- * createFile for NFS3.0 and NFS4.1 protocols
- * createNameSpace for Nvme/TCP and Nvme/FC protocol
+ * createLun for iSCSI, FC protocols
+ * createFile for NFS3.0 and NFS4.1 protocols
+ * createNameSpace for Nvme/TCP and Nvme/FC protocol
+ *
* @param cloudstackVolume the CloudStack volume to create
* @return the created CloudStackVolume object
*/
@@ -250,9 +393,10 @@ public Volume getStorageVolume(Volume volume)
/**
* Method encapsulates the behavior based on the opted protocol in subclasses.
* it is going to mimic
- * updateLun for iSCSI, FC protocols
- * updateFile for NFS3.0 and NFS4.1 protocols
- * updateNameSpace for Nvme/TCP and Nvme/FC protocol
+ * updateLun for iSCSI, FC protocols
+ * updateFile for NFS3.0 and NFS4.1 protocols
+ * updateNameSpace for Nvme/TCP and Nvme/FC protocol
+ *
* @param cloudstackVolume the CloudStack volume to update
* @return the updated CloudStackVolume object
*/
@@ -261,9 +405,10 @@ public Volume getStorageVolume(Volume volume)
/**
* Method encapsulates the behavior based on the opted protocol in subclasses.
* it is going to mimic
- * deleteLun for iSCSI, FC protocols
- * deleteFile for NFS3.0 and NFS4.1 protocols
- * deleteNameSpace for Nvme/TCP and Nvme/FC protocol
+ * deleteLun for iSCSI, FC protocols
+ * deleteFile for NFS3.0 and NFS4.1 protocols
+ * deleteNameSpace for Nvme/TCP and Nvme/FC protocol
+ *
* @param cloudstackVolume the CloudStack volume to delete
*/
abstract void deleteCloudStackVolume(CloudStackVolume cloudstackVolume);
@@ -271,9 +416,10 @@ public Volume getStorageVolume(Volume volume)
/**
* Method encapsulates the behavior based on the opted protocol in subclasses.
* it is going to mimic
- * getLun for iSCSI, FC protocols
- * getFile for NFS3.0 and NFS4.1 protocols
- * getNameSpace for Nvme/TCP and Nvme/FC protocol
+ * getLun for iSCSI, FC protocols
+ * getFile for NFS3.0 and NFS4.1 protocols
+ * getNameSpace for Nvme/TCP and Nvme/FC protocol
+ *
* @param cloudstackVolume the CloudStack volume to retrieve
* @return the retrieved CloudStackVolume object
*/
@@ -281,28 +427,31 @@ public Volume getStorageVolume(Volume volume)
/**
* Method encapsulates the behavior based on the opted protocol in subclasses
- * createiGroup for iSCSI and FC protocols
- * createExportPolicy for NFS 3.0 and NFS 4.1 protocols
- * createSubsystem for Nvme/TCP and Nvme/FC protocols
+ * createiGroup for iSCSI and FC protocols
+ * createExportPolicy for NFS 3.0 and NFS 4.1 protocols
+ * createSubsystem for Nvme/TCP and Nvme/FC protocols
+ *
* @param accessGroup the access group to create
* @return the created AccessGroup object
*/
- abstract AccessGroup createAccessGroup(AccessGroup accessGroup);
+ abstract public AccessGroup createAccessGroup(AccessGroup accessGroup);
/**
* Method encapsulates the behavior based on the opted protocol in subclasses
- * deleteiGroup for iSCSI and FC protocols
- * deleteExportPolicy for NFS 3.0 and NFS 4.1 protocols
- * deleteSubsystem for Nvme/TCP and Nvme/FC protocols
+ * deleteiGroup for iSCSI and FC protocols
+ * deleteExportPolicy for NFS 3.0 and NFS 4.1 protocols
+ * deleteSubsystem for Nvme/TCP and Nvme/FC protocols
+ *
* @param accessGroup the access group to delete
*/
- abstract void deleteAccessGroup(AccessGroup accessGroup);
+ abstract public void deleteAccessGroup(AccessGroup accessGroup);
/**
* Method encapsulates the behavior based on the opted protocol in subclasses
- * updateiGroup example add/remove-Iqn for iSCSI and FC protocols
- * updateExportPolicy example add/remove-Rule for NFS 3.0 and NFS 4.1 protocols
- * //TODO for Nvme/TCP and Nvme/FC protocols
+ * updateiGroup example add/remove-Iqn for iSCSI and FC protocols
+ * updateExportPolicy example add/remove-Rule for NFS 3.0 and NFS 4.1 protocols
+ * //TODO for Nvme/TCP and Nvme/FC protocols
+ *
* @param accessGroup the access group to update
* @return the updated AccessGroup object
*/
@@ -310,9 +459,10 @@ public Volume getStorageVolume(Volume volume)
/**
* Method encapsulates the behavior based on the opted protocol in subclasses
- * getiGroup for iSCSI and FC protocols
- * getExportPolicy for NFS 3.0 and NFS 4.1 protocols
- * getNameSpace for Nvme/TCP and Nvme/FC protocols
+ * getiGroup for iSCSI and FC protocols
+ * getExportPolicy for NFS 3.0 and NFS 4.1 protocols
+ * getNameSpace for Nvme/TCP and Nvme/FC protocols
+ *
* @param accessGroup the access group to retrieve
* @return the retrieved AccessGroup object
*/
@@ -320,17 +470,56 @@ public Volume getStorageVolume(Volume volume)
/**
* Method encapsulates the behavior based on the opted protocol in subclasses
- * lunMap for iSCSI and FC protocols
- * //TODO for Nvme/TCP and Nvme/FC protocols
+ * lunMap for iSCSI and FC protocols
+ * //TODO for Nvme/TCP and Nvme/FC protocols
+ *
* @param values
*/
- abstract void enableLogicalAccess(Map values);
+ abstract void enableLogicalAccess(Map values);
/**
* Method encapsulates the behavior based on the opted protocol in subclasses
- * lunUnmap for iSCSI and FC protocols
- * //TODO for Nvme/TCP and Nvme/FC protocols
+ * lunUnmap for iSCSI and FC protocols
+ * //TODO for Nvme/TCP and Nvme/FC protocols
+ *
* @param values
*/
- abstract void disableLogicalAccess(Map values);
+ abstract void disableLogicalAccess(Map values);
+
+ private Boolean jobPollForSuccess(String jobUUID) {
+ //Create URI for GET Job API
+ int jobRetryCount = 0;
+ Job jobResp = null;
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ while (jobResp == null || !jobResp.getState().equals(Constants.JOB_SUCCESS)) {
+ if (jobRetryCount >= Constants.JOB_MAX_RETRIES) {
+ s_logger.error("Job did not complete within expected time.");
+ throw new CloudRuntimeException("Job did not complete within expected time.");
+ }
+
+ try {
+ jobResp = jobFeignClient.getJobByUUID(authHeader, jobUUID);
+ if (jobResp == null) {
+ s_logger.warn("Job with UUID " + jobUUID + " not found. Retrying...");
+ } else if (jobResp.getState().equals(Constants.JOB_FAILURE)) {
+ throw new CloudRuntimeException("Job failed with error: " + jobResp.getMessage());
+ }
+ } catch (FeignException.FeignClientException e) {
+ throw new CloudRuntimeException("Failed to fetch job status: " + e.getMessage());
+ }
+
+ jobRetryCount++;
+ Thread.sleep(Constants.CREATE_VOLUME_CHECK_SLEEP_TIME); // Sleep for 2 seconds before polling again
+ }
+ if (jobResp == null || !jobResp.getState().equals(Constants.JOB_SUCCESS)) {
+ return false;
+ }
+ } catch (FeignException.FeignClientException e) {
+ throw new CloudRuntimeException("Failed to fetch job status: " + e.getMessage());
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ return true;
+ }
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java
index 0b47e1ff70a0..7b5372c69bdd 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java
@@ -19,19 +19,28 @@
package org.apache.cloudstack.storage.service;
+import com.cloud.host.HostVO;
+import com.cloud.hypervisor.Hypervisor;
import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.storage.feign.FeignClientFactory;
import org.apache.cloudstack.storage.feign.client.SANFeignClient;
+import org.apache.cloudstack.storage.feign.model.Igroup;
+import org.apache.cloudstack.storage.feign.model.Initiator;
import org.apache.cloudstack.storage.feign.model.Lun;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
import org.apache.cloudstack.storage.service.model.AccessGroup;
import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
import org.apache.cloudstack.storage.utils.Constants;
import org.apache.cloudstack.storage.utils.Utility;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
+import java.util.ArrayList;
+import java.util.List;
import java.util.Map;
public class UnifiedSANStrategy extends SANStrategy {
@@ -102,13 +111,192 @@ CloudStackVolume getCloudStackVolume(CloudStackVolume cloudstackVolume) {
@Override
public AccessGroup createAccessGroup(AccessGroup accessGroup) {
- //TODO
- return null;
+ s_logger.info("createAccessGroup : Create Igroup");
+ String igroupName = "unknown";
+ if (accessGroup == null) {
+ throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, invalid accessGroup object passed");
+ }
+ try {
+ // Get StoragePool details
+ if (accessGroup.getPrimaryDataStoreInfo() == null || accessGroup.getPrimaryDataStoreInfo().getDetails() == null
+ || accessGroup.getPrimaryDataStoreInfo().getDetails().isEmpty()) {
+ throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, invalid datastore details in the request");
+ }
+ Map dataStoreDetails = accessGroup.getPrimaryDataStoreInfo().getDetails();
+ s_logger.debug("createAccessGroup: Successfully fetched datastore details.");
+
+ // Get AuthHeader
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+
+ // Generate Igroup request
+ Igroup igroupRequest = new Igroup();
+ List hostsIdentifier = new ArrayList<>();
+ String svmName = dataStoreDetails.get(Constants.SVM_NAME);
+ igroupName = Utility.getIgroupName(svmName, accessGroup.getScope().getScopeType(), accessGroup.getScope().getScopeId());
+ Hypervisor.HypervisorType hypervisorType = accessGroup.getPrimaryDataStoreInfo().getHypervisor();
+
+ ProtocolType protocol = ProtocolType.valueOf(dataStoreDetails.get(Constants.PROTOCOL));
+ // Check if all hosts support the protocol
+ if (accessGroup.getHostsToConnect() == null || accessGroup.getHostsToConnect().isEmpty()) {
+ throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, no hosts to connect provided in the request");
+ }
+ if (!validateProtocolSupportAndFetchHostsIdentifier(accessGroup.getHostsToConnect(), protocol, hostsIdentifier)) {
+ String errMsg = "createAccessGroup: Not all hosts in the " + accessGroup.getScope().getScopeType().toString() + " support the protocol: " + protocol.name();
+ throw new CloudRuntimeException(errMsg);
+ }
+
+ if (svmName != null && !svmName.isEmpty()) {
+ Svm svm = new Svm();
+ svm.setName(svmName);
+ igroupRequest.setSvm(svm);
+ }
+
+ if (igroupName != null && !igroupName.isEmpty()) {
+ igroupRequest.setName(igroupName);
+ }
+
+// if (hypervisorType != null) {
+// String hypervisorName = hypervisorType.name();
+// igroupRequest.setOsType(Igroup.OsTypeEnum.valueOf(Utility.getOSTypeFromHypervisor(hypervisorName)));
+// } else if ( accessGroup.getScope().getScopeType() == ScopeType.ZONE) {
+// igroupRequest.setOsType(Igroup.OsTypeEnum.linux); // TODO: Defaulting to LINUX for zone scope for now, this has to be revisited when we support other hypervisors
+// }
+ igroupRequest.setOsType(Igroup.OsTypeEnum.linux);
+
+ if (hostsIdentifier != null && hostsIdentifier.size() > 0) {
+ List initiators = new ArrayList<>();
+ for (String hostIdentifier : hostsIdentifier) {
+ Initiator initiator = new Initiator();
+ initiator.setName(hostIdentifier);
+ initiators.add(initiator);
+ }
+ igroupRequest.setInitiators(initiators);
+ }
+ igroupRequest.setProtocol(Igroup.ProtocolEnum.valueOf("iscsi"));
+ // Create Igroup
+ s_logger.debug("createAccessGroup: About to call sanFeignClient.createIgroup with igroupName: {}", igroupName);
+ AccessGroup createdAccessGroup = new AccessGroup();
+ OntapResponse createdIgroup = null;
+ try {
+ createdIgroup = sanFeignClient.createIgroup(authHeader, true, igroupRequest);
+ } catch (Exception feignEx) {
+ String errMsg = feignEx.getMessage();
+ if (errMsg != null && errMsg.contains(("5374023"))) {
+ s_logger.warn("createAccessGroup: Igroup with name {} already exists. Fetching existing Igroup.", igroupName);
+ // TODO: Currently we aren't doing anything with the returned AccessGroup object, so, haven't added code here to fetch the existing Igroup and set it in AccessGroup.
+ return createdAccessGroup;
+ }
+ s_logger.error("createAccessGroup: Exception during Feign call: {}", feignEx.getMessage(), feignEx);
+ throw feignEx;
+ }
+
+ if (createdIgroup == null || createdIgroup.getRecords() == null || createdIgroup.getRecords().isEmpty()) {
+ s_logger.error("createAccessGroup: Igroup creation failed for Igroup Name {}", igroupName);
+ throw new CloudRuntimeException("Failed to create Igroup: " + igroupName);
+ }
+ Igroup igroup = createdIgroup.getRecords().get(0);
+ s_logger.debug("createAccessGroup: Successfully extracted igroup from response: {}", igroup);
+ s_logger.info("createAccessGroup: Igroup created successfully. IgroupName: {}", igroup.getName());
+
+ createdAccessGroup.setIgroup(igroup);
+ s_logger.debug("createAccessGroup: Returning createdAccessGroup");
+ return createdAccessGroup;
+ } catch (Exception e) {
+ s_logger.error("Exception occurred while creating Igroup: {}, Exception: {}", igroupName, e.getMessage(), e);
+ throw new CloudRuntimeException("Failed to create Igroup: " + e.getMessage(), e);
+ }
}
@Override
public void deleteAccessGroup(AccessGroup accessGroup) {
- //TODO
+ s_logger.info("deleteAccessGroup: Deleting iGroup");
+
+ if (accessGroup == null) {
+ throw new CloudRuntimeException("deleteAccessGroup: Invalid accessGroup object - accessGroup is null");
+ }
+
+ // Get PrimaryDataStoreInfo from accessGroup
+ PrimaryDataStoreInfo primaryDataStoreInfo = accessGroup.getPrimaryDataStoreInfo();
+ if (primaryDataStoreInfo == null) {
+ throw new CloudRuntimeException("deleteAccessGroup: PrimaryDataStoreInfo is null in accessGroup");
+ }
+
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+
+ // Extract SVM name from storage (already initialized in constructor via OntapStorage)
+ String svmName = storage.getSvmName();
+
+ // Determine scope and generate iGroup name
+ String igroupName;
+ if (primaryDataStoreInfo.getClusterId() != null) {
+ igroupName = Utility.getIgroupName(svmName, com.cloud.storage.ScopeType.CLUSTER, primaryDataStoreInfo.getClusterId());
+ s_logger.info("deleteAccessGroup: Deleting cluster-scoped iGroup '{}'", igroupName);
+ } else {
+ igroupName = Utility.getIgroupName(svmName, com.cloud.storage.ScopeType.ZONE, primaryDataStoreInfo.getDataCenterId());
+ s_logger.info("deleteAccessGroup: Deleting zone-scoped iGroup '{}'", igroupName);
+ }
+
+ // Get the iGroup to retrieve its UUID
+ Map igroupParams = Map.of(
+ Constants.SVM_DOT_NAME, svmName,
+ Constants.NAME, igroupName
+ );
+
+ try {
+ OntapResponse igroupResponse = sanFeignClient.getIgroupResponse(authHeader, igroupParams);
+ if (igroupResponse == null || igroupResponse.getRecords() == null || igroupResponse.getRecords().isEmpty()) {
+ s_logger.warn("deleteAccessGroup: iGroup '{}' not found, may have been already deleted", igroupName);
+ return;
+ }
+
+ Igroup igroup = igroupResponse.getRecords().get(0);
+ String igroupUuid = igroup.getUuid();
+
+ if (igroupUuid == null || igroupUuid.isEmpty()) {
+ throw new CloudRuntimeException("deleteAccessGroup: iGroup UUID is null or empty for iGroup: " + igroupName);
+ }
+
+ s_logger.info("deleteAccessGroup: Deleting iGroup '{}' with UUID '{}'", igroupName, igroupUuid);
+
+ // Delete the iGroup using the UUID
+ sanFeignClient.deleteIgroup(authHeader, igroupUuid);
+
+ s_logger.info("deleteAccessGroup: Successfully deleted iGroup '{}'", igroupName);
+
+ } catch (Exception e) {
+ String errorMsg = e.getMessage();
+ // Check if iGroup doesn't exist (ONTAP error code: 5374852 - "The initiator group does not exist.")
+ if (errorMsg != null && (errorMsg.contains("5374852") || errorMsg.contains("not found"))) {
+ s_logger.warn("deleteAccessGroup: iGroup '{}' does not exist, skipping deletion", igroupName);
+ } else {
+ throw e;
+ }
+ }
+
+ } catch (Exception e) {
+ s_logger.error("deleteAccessGroup: Failed to delete iGroup. Exception: {}", e.getMessage(), e);
+ throw new CloudRuntimeException("Failed to delete iGroup: " + e.getMessage(), e);
+ }
+ }
+
+ private boolean validateProtocolSupportAndFetchHostsIdentifier(List hosts, ProtocolType protocolType, List hostIdentifiers) {
+ switch (protocolType) {
+ case ISCSI:
+ String protocolPrefix = Constants.IQN;
+ for (HostVO host : hosts) {
+ if (host == null || host.getStorageUrl() == null || host.getStorageUrl().trim().isEmpty()
+ || !host.getStorageUrl().startsWith(protocolPrefix)) {
+ return false;
+ }
+ hostIdentifiers.add(host.getStorageUrl());
+ }
+ break;
+ default:
+ throw new CloudRuntimeException("validateProtocolSupportAndFetchHostsIdentifier : Unsupported protocol: " + protocolType.name());
+ }
+ s_logger.info("validateProtocolSupportAndFetchHostsIdentifier: All hosts support the protocol: " + protocolType.name());
+ return true;
}
@Override
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java
index c4dfce7ce51c..ef6fe7353291 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java
@@ -19,14 +19,23 @@
package org.apache.cloudstack.storage.service.model;
+import com.cloud.host.HostVO;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
import org.apache.cloudstack.storage.feign.model.ExportPolicy;
import org.apache.cloudstack.storage.feign.model.Igroup;
+import java.util.List;
+
public class AccessGroup {
private Igroup igroup;
private ExportPolicy exportPolicy;
+ private List hostsToConnect;
+ private PrimaryDataStoreInfo primaryDataStoreInfo;
+ private Scope scope;
+
public Igroup getIgroup() {
return igroup;
}
@@ -42,4 +51,23 @@ public ExportPolicy getPolicy() {
public void setPolicy(ExportPolicy policy) {
this.exportPolicy = policy;
}
+
+ public List getHostsToConnect() {
+ return hostsToConnect;
+ }
+ public void setHostsToConnect(List hostsToConnect) {
+ this.hostsToConnect = hostsToConnect;
+ }
+ public PrimaryDataStoreInfo getPrimaryDataStoreInfo() {
+ return primaryDataStoreInfo;
+ }
+ public void setPrimaryDataStoreInfo(PrimaryDataStoreInfo primaryDataStoreInfo) {
+ this.primaryDataStoreInfo = primaryDataStoreInfo;
+ }
+ public Scope getScope() {
+ return scope;
+ }
+ public void setScope(Scope scope) {
+ this.scope = scope;
+ }
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java
index b58e8484cd48..0744777c12e5 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java
@@ -19,6 +19,7 @@
package org.apache.cloudstack.storage.utils;
+
public class Constants {
public static final String NFS = "nfs";
@@ -28,7 +29,10 @@ public class Constants {
public static final String SVM_NAME = "svmName";
public static final String USERNAME = "username";
public static final String PASSWORD = "password";
+ public static final String DATA_LIF = "dataLIF";
public static final String MANAGEMENT_LIF = "managementLIF";
+ public static final String VOLUME_NAME = "volumeName";
+ public static final String VOLUME_UUID = "volumeUUID";
public static final String IS_DISAGGREGATED = "isDisaggregated";
public static final String RUNNING = "running";
@@ -40,24 +44,41 @@ public class Constants {
public static final String JOB_FAILURE = "failure";
public static final String JOB_SUCCESS = "success";
+ public static final String TRUE = "true";
+ public static final String FALSE = "false";
+
// Query params
public static final String NAME = "name";
public static final String FIELDS = "fields";
public static final String AGGREGATES = "aggregates";
public static final String STATE = "state";
+ public static final String DATA_NFS = "data_nfs";
+ public static final String DATA_ISCSI = "data_iscsi";
+ public static final String IP_ADDRESS = "ip.address";
+ public static final String SERVICES = "services";
+ public static final String RETURN_RECORDS = "return_records";
public static final int JOB_MAX_RETRIES = 100;
public static final int CREATE_VOLUME_CHECK_SLEEP_TIME = 2000;
- public static final String PATH_SEPARATOR = "/";
+ public static final String SLASH = "/";
public static final String EQUALS = "=";
public static final String SEMICOLON = ";";
public static final String COMMA = ",";
+ public static final String HYPHEN = "-";
public static final String VOLUME_PATH_PREFIX = "/vol/";
+ public static final String ONTAP_NAME_REGEX = "^[a-zA-Z][a-zA-Z0-9_]*$";
public static final String KVM = "KVM";
public static final String HTTPS = "https://";
-
+ public static final String SVM_DOT_NAME = "svm.name";
+ public static final String LUN_DOT_NAME = "lun.name";
+ public static final String IQN = "iqn";
+ public static final String LUN_DOT_UUID = "lun.uuid";
+ public static final String IGROUP_DOT_NAME = "igroup.name";
+ public static final String IGROUP_DOT_UUID = "igroup.uuid";
+ public static final String UNDERSCORE = "_";
+ public static final String CS = "cs";
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java
index af48724f984c..323adfd0320c 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java
@@ -19,14 +19,13 @@
package org.apache.cloudstack.storage.utils;
+import com.cloud.storage.ScopeType;
import com.cloud.utils.StringUtils;
import com.cloud.utils.exception.CloudRuntimeException;
-import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
-import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.feign.model.Lun;
-import org.apache.cloudstack.storage.feign.model.LunSpace;
-import org.apache.cloudstack.storage.feign.model.Svm;
-import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.provider.StorageProviderFactory;
+import org.apache.cloudstack.storage.service.StorageStrategy;
import org.apache.cloudstack.storage.service.model.ProtocolType;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -53,41 +52,40 @@ public static String generateAuthHeader (String username, String password) {
return BASIC + StringUtils.SPACE + new String(encodedBytes);
}
- public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, DataObject dataObject) {
- CloudStackVolume cloudStackVolumeRequest = null;
-
- String protocol = details.get(Constants.PROTOCOL);
- if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) {
- cloudStackVolumeRequest = new CloudStackVolume();
- Lun lunRequest = new Lun();
- Svm svm = new Svm();
- svm.setName(details.get(Constants.SVM_NAME));
- lunRequest.setSvm(svm);
-
- LunSpace lunSpace = new LunSpace();
- lunSpace.setSize(dataObject.getSize());
- lunRequest.setSpace(lunSpace);
- //Lun name is full path like in unified "/vol/VolumeName/LunName"
- String lunFullName = Constants.VOLUME_PATH_PREFIX + storagePool.getName() + Constants.PATH_SEPARATOR + dataObject.getName();
- lunRequest.setName(lunFullName);
+ public static String getOSTypeFromHypervisor(String hypervisorType){
+ switch (hypervisorType) {
+ case Constants.KVM:
+ return Lun.OsTypeEnum.LINUX.name();
+ default:
+ String errMsg = "getOSTypeFromHypervisor : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage";
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+ }
- String hypervisorType = storagePool.getHypervisor().name();
- String osType = null;
- switch (hypervisorType) {
- case Constants.KVM:
- osType = Lun.OsTypeEnum.LINUX.getValue();
- break;
- default:
- String errMsg = "createCloudStackVolume : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage";
- s_logger.error(errMsg);
- throw new CloudRuntimeException(errMsg);
- }
- lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType));
+ public static StorageStrategy getStrategyByStoragePoolDetails(Map details) {
+ if (details == null || details.isEmpty()) {
+ s_logger.error("getStrategyByStoragePoolDetails: Storage pool details are null or empty");
+ throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Storage pool details are null or empty");
+ }
+ String protocol = details.get(Constants.PROTOCOL);
+ OntapStorage ontapStorage = new OntapStorage(details.get(Constants.USERNAME), details.get(Constants.PASSWORD),
+ details.get(Constants.MANAGEMENT_LIF), details.get(Constants.SVM_NAME), Long.parseLong(details.get(Constants.SIZE)),
+ ProtocolType.valueOf(protocol),
+ Boolean.parseBoolean(details.get(Constants.IS_DISAGGREGATED)));
+ StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
+ boolean isValid = storageStrategy.connect();
+ if (isValid) {
+ s_logger.info("Connection to Ontap SVM [{}] successful", details.get(Constants.SVM_NAME));
+ return storageStrategy;
+ } else {
+ s_logger.error("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed");
+ throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed");
+ }
+ }
- cloudStackVolumeRequest.setLun(lunRequest);
- return cloudStackVolumeRequest;
- } else {
- throw new CloudRuntimeException("createCloudStackVolumeRequestByProtocol: Unsupported protocol " + protocol);
- }
+ public static String getIgroupName(String svmName, ScopeType scopeType, Long scopeId) {
+ //Igroup name format: cs_svmName_scopeId
+ return Constants.CS + Constants.UNDERSCORE + svmName + Constants.UNDERSCORE + scopeType.toString().toLowerCase() + Constants.UNDERSCORE + scopeId;
}
}