diff --git a/.changelog/3982.txt b/.changelog/3982.txt new file mode 100644 index 0000000000..7a513e0b8a --- /dev/null +++ b/.changelog/3982.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/mongodbatlas_search_index: Adds `num_partitions` attribute +``` + +```release-note:enhancement +data-source/mongodbatlas_search_index: Adds `num_partitions` attribute +``` + +```release-note:enhancement +data-source/mongodbatlas_search_indexes: Adds `num_partitions` attribute +``` diff --git a/docs/data-sources/search_index.md b/docs/data-sources/search_index.md index 7b286c1f2e..fa075b033d 100644 --- a/docs/data-sources/search_index.md +++ b/docs/data-sources/search_index.md @@ -46,5 +46,6 @@ data "mongodbatlas_search_index" "test" { * `type_sets` - Set of type set definitions (when present). Each item includes: * `name` - Type set name. * `types` - JSON array string describing the types for the set. +* `num_partitions` - Number of index partitions, returns 0 if not set in the resource For more information see: [MongoDB Atlas API Reference.](https://docs.atlas.mongodb.com/atlas-search/) - [and MongoDB Atlas API - Search](https://docs.atlas.mongodb.com/reference/api/atlas-search/) Documentation for more information. diff --git a/docs/data-sources/search_indexes.md b/docs/data-sources/search_indexes.md index 6bac0ec411..88e32e7699 100644 --- a/docs/data-sources/search_indexes.md +++ b/docs/data-sources/search_indexes.md @@ -52,5 +52,6 @@ data "mongodbatlas_search_indexes" "test" { * `type_sets` - Set of type set definitions (when present). Each item includes: * `name` - Type set name. * `types` - JSON array string describing the types for the set. +* `num_partitions` - Number of index partitions, returns 0 if not set in the resource For more information see: [MongoDB Atlas API Reference.](https://docs.atlas.mongodb.com/atlas-search/) - [and MongoDB Atlas API - Search](https://docs.atlas.mongodb.com/reference/api/atlas-search/) Documentation for more information. diff --git a/docs/resources/search_index.md b/docs/resources/search_index.md index 3dc12b2633..c3f4491666 100644 --- a/docs/resources/search_index.md +++ b/docs/resources/search_index.md @@ -237,6 +237,8 @@ EOF EOF ``` +* `num_partitions` - (Optional) Number of index partitions. Allowed values are [1, 2, 4]. Default value is 1. + ## Attributes Reference In addition to all arguments above, the following attributes are exported: diff --git a/internal/service/searchindex/data_source_search_index.go b/internal/service/searchindex/data_source_search_index.go index 8a2f0c910a..681783a2ef 100644 --- a/internal/service/searchindex/data_source_search_index.go +++ b/internal/service/searchindex/data_source_search_index.go @@ -118,6 +118,10 @@ func returnSearchIndexDSSchema() map[string]*schema.Schema { }, }, }, + "num_partitions": { + Type: schema.TypeInt, + Computed: true, + }, } } @@ -223,6 +227,10 @@ func dataSourceMongoDBAtlasSearchIndexRead(ctx context.Context, d *schema.Resour return diag.Errorf("error setting `stored_source` for search index (%s): %s", d.Id(), err) } + if err := d.Set("num_partitions", searchIndex.LatestDefinition.NumPartitions); err != nil { + return diag.Errorf("error setting `num_partitions` for search index (%s): %s", d.Id(), err) + } + d.SetId(conversion.EncodeStateID(map[string]string{ "project_id": projectID.(string), "cluster_name": clusterName.(string), diff --git a/internal/service/searchindex/data_source_search_indexes.go b/internal/service/searchindex/data_source_search_indexes.go index 39988aa07f..042283cd76 100644 --- a/internal/service/searchindex/data_source_search_indexes.go +++ b/internal/service/searchindex/data_source_search_indexes.go @@ -102,6 +102,7 @@ func flattenSearchIndexes(searchIndexes []admin.SearchIndexResponse, projectID, "status": searchIndexes[i].Status, "synonyms": flattenSearchIndexSynonyms(searchIndexes[i].LatestDefinition.GetSynonyms()), "type": searchIndexes[i].Type, + "num_partitions": searchIndexes[i].LatestDefinition.NumPartitions, } if searchIndexes[i].LatestDefinition.Mappings != nil { diff --git a/internal/service/searchindex/resource_search_index.go b/internal/service/searchindex/resource_search_index.go index bf85d81327..0c8f4da5c5 100644 --- a/internal/service/searchindex/resource_search_index.go +++ b/internal/service/searchindex/resource_search_index.go @@ -156,6 +156,10 @@ func returnSearchIndexSchema() map[string]*schema.Schema { }, }, }, + "num_partitions": { + Type: schema.TypeInt, + Optional: true, + }, } } @@ -269,6 +273,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. StoredSource: searchRead.LatestDefinition.StoredSource, Synonyms: searchRead.LatestDefinition.Synonyms, Fields: searchRead.LatestDefinition.Fields, + NumPartitions: searchRead.LatestDefinition.NumPartitions, }, } @@ -276,6 +281,10 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. searchIndex.Definition.Analyzer = conversion.StringPtr(d.Get("analyzer").(string)) } + if d.HasChange("num_partitions") { + searchIndex.Definition.NumPartitions = conversion.IntPtr(d.Get("num_partitions").(int)) + } + if d.HasChange("search_analyzer") { searchIndex.Definition.SearchAnalyzer = conversion.StringPtr(d.Get("search_analyzer").(string)) } @@ -485,6 +494,10 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.Errorf("error setting `stored_source` for search index (%s): %s", d.Id(), err) } + if err := d.Set("num_partitions", searchIndex.LatestDefinition.NumPartitions); err != nil { + return diag.Errorf("error setting `num_partitions` for search index (%s): %s", d.Id(), err) + } + return nil } @@ -501,6 +514,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. Definition: &admin.BaseSearchIndexCreateRequestDefinition{ Analyzer: conversion.StringPtr(d.Get("analyzer").(string)), SearchAnalyzer: conversion.StringPtr(d.Get("search_analyzer").(string)), + NumPartitions: conversion.IntPtr(d.Get("num_partitions").(int)), }, } diff --git a/internal/service/searchindex/resource_search_index_test.go b/internal/service/searchindex/resource_search_index_test.go index c3aed2d45a..d922a6336d 100644 --- a/internal/service/searchindex/resource_search_index_test.go +++ b/internal/service/searchindex/resource_search_index_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "regexp" + "strconv" "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -192,6 +193,57 @@ func TestAccSearchIndex_withVector(t *testing.T) { resource.ParallelTest(t, *basicVectorTestCase(t)) } +func TestAccSearchIndex_withNumPartitions(t *testing.T) { + var ( + projectID, clusterName = acc.ClusterNameExecution(t, true) + indexName = acc.RandomName() + ) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroySearchIndex, + Steps: []resource.TestStep{ + { + Config: configSearchWithNumPartitions(projectID, indexName, clusterName, nil), + Check: checkSearchWithNumPartitions(projectID, indexName, clusterName, nil), + }, + { + Config: configSearchWithNumPartitions(projectID, indexName, clusterName, conversion.IntPtr(2)), + Check: checkSearchWithNumPartitions(projectID, indexName, clusterName, conversion.IntPtr(2)), + }, + { + Config: configSearchWithNumPartitions(projectID, indexName, clusterName, nil), + Check: checkSearchWithNumPartitions(projectID, indexName, clusterName, nil), + }, + }, + }) +} + +func TestAccVectorSearchIndex_withNumPartitions(t *testing.T) { + var ( + projectID, clusterName = acc.ClusterNameExecution(t, true) + indexName = acc.RandomName() + ) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroySearchIndex, + Steps: []resource.TestStep{ + { + Config: configVectorSearchWithNumPartitions(projectID, indexName, clusterName, nil), + Check: checkVectorSearchWithNumPartitions(projectID, indexName, clusterName, nil), + }, + { + Config: configVectorSearchWithNumPartitions(projectID, indexName, clusterName, conversion.IntPtr(2)), + Check: checkVectorSearchWithNumPartitions(projectID, indexName, clusterName, conversion.IntPtr(2)), + }, + { + Config: configVectorSearchWithNumPartitions(projectID, indexName, clusterName, nil), + Check: checkVectorSearchWithNumPartitions(projectID, indexName, clusterName, nil), + }, + }, + }) +} func basicTestCase(tb testing.TB) *resource.TestCase { tb.Helper() var ( @@ -372,7 +424,7 @@ func configBasic(projectID, clusterName, indexName, indexType, storedSource stri data "mongodbatlas_search_index" "data_index" { cluster_name = mongodbatlas_search_index.test.cluster_name project_id = mongodbatlas_search_index.test.project_id - index_id = mongodbatlas_search_index.test.index_id + index_id = mongodbatlas_search_index.test.index_id } `, clusterName, projectID, indexName, database, collection, searchAnalyzer, extra) } @@ -409,7 +461,7 @@ func configWithMapping(projectID, indexName, clusterName string) string { data "mongodbatlas_search_index" "data_index" { cluster_name = mongodbatlas_search_index.test.cluster_name project_id = mongodbatlas_search_index.test.project_id - index_id = mongodbatlas_search_index.test.index_id + index_id = mongodbatlas_search_index.test.index_id } `, clusterName, projectID, indexName, database, collection, searchAnalyzer, analyzersTF, mappingsFieldsTF) } @@ -451,7 +503,7 @@ func configWithSynonyms(projectID, indexName, clusterName string, has bool) stri data "mongodbatlas_search_index" "data_index" { cluster_name = mongodbatlas_search_index.test.cluster_name project_id = mongodbatlas_search_index.test.project_id - index_id = mongodbatlas_search_index.test.index_id + index_id = mongodbatlas_search_index.test.index_id } `, clusterName, projectID, indexName, database, collection, searchAnalyzer, synonymsStr) } @@ -489,7 +541,7 @@ func configAdditional(projectID, indexName, clusterName, additional string) stri data "mongodbatlas_search_index" "data_index" { cluster_name = mongodbatlas_search_index.test.cluster_name project_id = mongodbatlas_search_index.test.project_id - index_id = mongodbatlas_search_index.test.index_id + index_id = mongodbatlas_search_index.test.index_id } `, clusterName, projectID, indexName, database, collection, searchAnalyzer, additional) } @@ -533,11 +585,117 @@ func configVector(projectID, indexName, clusterName string) string { data "mongodbatlas_search_index" "data_index" { cluster_name = mongodbatlas_search_index.test.cluster_name project_id = mongodbatlas_search_index.test.project_id - index_id = mongodbatlas_search_index.test.index_id + index_id = mongodbatlas_search_index.test.index_id } `, clusterName, projectID, indexName, database, collection, fieldsJSON) } +func configVectorSearchWithNumPartitions(projectID, indexName, clusterName string, numPartitions *int) string { + var numPartitionsLine string + hasNumPartitions := numPartitions != nil + if hasNumPartitions { + numPartitionsLine = fmt.Sprintf("num_partitions = %d", *numPartitions) + } + return fmt.Sprintf(` + + resource "mongodbatlas_search_deployment" "test" { + cluster_name = %[1]q + project_id = %[2]q + specs = [ + { + instance_size = "S20_HIGHCPU_NVME" + node_count = 2 + } + ] + } + + resource "mongodbatlas_search_index" "test" { + cluster_name = %[1]q + project_id = %[2]q + name = %[3]q + database = %[4]q + collection_name = %[5]q + + type = "vectorSearch" + %[6]s + fields = <<-EOF + %[7]s + EOF + + depends_on = [mongodbatlas_search_deployment.test] + } + + data "mongodbatlas_search_index" "data_index" { + cluster_name = mongodbatlas_search_index.test.cluster_name + project_id = mongodbatlas_search_index.test.project_id + index_id = mongodbatlas_search_index.test.index_id + } + `, clusterName, projectID, indexName, database, collection, numPartitionsLine, fieldsJSON) +} +func configSearchWithNumPartitions(projectID, indexName, clusterName string, numPartitions *int) string { + var numPartitionsLine string + hasNumPartitions := numPartitions != nil + if hasNumPartitions { + numPartitionsLine = fmt.Sprintf("num_partitions = %d", *numPartitions) + } + return fmt.Sprintf(` + + resource "mongodbatlas_search_deployment" "test" { + cluster_name = %[1]q + project_id = %[2]q + specs = [ + { + instance_size = "S20_HIGHCPU_NVME" + node_count = 2 + } + ] + } + + resource "mongodbatlas_search_index" "test" { + cluster_name = %[1]q + project_id = %[2]q + name = %[3]q + database = %[4]q + collection_name = %[5]q + analyzer = "lucene.standard" + search_analyzer = "lucene.standard" + mappings_dynamic = true + type = "search" + %[6]s + + depends_on = [mongodbatlas_search_deployment.test] + } + + data "mongodbatlas_search_index" "data_index" { + cluster_name = mongodbatlas_search_index.test.cluster_name + project_id = mongodbatlas_search_index.test.project_id + index_id = mongodbatlas_search_index.test.index_id + } + `, clusterName, projectID, indexName, database, collection, numPartitionsLine) +} +func checkVectorSearchWithNumPartitions(projectID, indexName, clusterName string, numPartitions *int) resource.TestCheckFunc { + indexType := "vectorSearch" + mappingsDynamic := "true" + checks := []resource.TestCheckFunc{ + resource.TestCheckResourceAttrWith(resourceName, "fields", acc.JSONEquals(fieldsJSON)), + resource.TestCheckResourceAttrWith(datasourceName, "fields", acc.JSONEquals(fieldsJSON)), + } + + if numPartitions != nil { + checks = append(checks, + resource.TestCheckResourceAttr(resourceName, "num_partitions", strconv.Itoa(*numPartitions)), + resource.TestCheckResourceAttr(datasourceName, "num_partitions", strconv.Itoa(*numPartitions)), + ) + } else { + checks = append(checks, + resource.TestCheckResourceAttr(resourceName, "num_partitions", "0"), + resource.TestCheckResourceAttr(datasourceName, "num_partitions", "0"), + ) + } + + return checkAggr(projectID, clusterName, indexName, indexType, mappingsDynamic, checks...) +} + func checkVector(projectID, indexName, clusterName string) resource.TestCheckFunc { indexType := "vectorSearch" mappingsDynamic := "true" @@ -546,6 +704,30 @@ func checkVector(projectID, indexName, clusterName string) resource.TestCheckFun resource.TestCheckResourceAttrWith(datasourceName, "fields", acc.JSONEquals(fieldsJSON))) } +func checkSearchWithNumPartitions(projectID, indexName, clusterName string, numPartitions *int) resource.TestCheckFunc { + indexType := "search" + mappingsDynamic := "true" + checks := []resource.TestCheckFunc{ + resource.TestCheckResourceAttr(resourceName, "analyzer", "lucene.standard"), + resource.TestCheckResourceAttr(resourceName, "search_analyzer", "lucene.standard"), + resource.TestCheckResourceAttr(datasourceName, "analyzer", "lucene.standard"), + resource.TestCheckResourceAttr(datasourceName, "search_analyzer", "lucene.standard"), + } + + if numPartitions != nil { + checks = append(checks, + resource.TestCheckResourceAttr(resourceName, "num_partitions", strconv.Itoa(*numPartitions)), + resource.TestCheckResourceAttr(datasourceName, "num_partitions", strconv.Itoa(*numPartitions)), + ) + } else { + checks = append(checks, + resource.TestCheckResourceAttr(resourceName, "num_partitions", "0"), + resource.TestCheckResourceAttr(datasourceName, "num_partitions", "0"), + ) + } + return checkAggr(projectID, clusterName, indexName, indexType, mappingsDynamic, checks...) +} + func importStateIDFunc(resourceName string) resource.ImportStateIdFunc { return func(s *terraform.State) (string, error) { rs, ok := s.RootModule().Resources[resourceName]