@@ -157,60 +157,232 @@ deployment_name = '{{ deployment_name }}';
157157Use the following StackQL query and manifest file to create a new <code >clusters</code > resource.
158158
159159<Tabs
160- defaultValue="create "
160+ defaultValue="all "
161161 values={[
162- { label: 'clusters ', value: 'create ', },
163- { label: 'Manifest', value: 'manifest', },
162+ { label: 'All Properties ', value: 'all ', },
163+ { label: 'Manifest', value: 'manifest', },
164164 ] }
165165>
166- <TabItem value="create ">
166+ <TabItem value="all ">
167167
168168``` sql
169169/* + create */
170170INSERT INTO databricks_workspace .compute .clusters (
171171deployment_name,
172- data__cluster_name,
173- data__is_single_node,
174172data__kind,
173+ data__cluster_name,
175174data__spark_version,
175+ data__use_ml_runtime,
176+ data__is_single_node,
176177data__node_type_id,
177- data__aws_attributes
178+ data__autoscale,
179+ data__num_workers,
180+ data__data_security_mode,
181+ data__runtime_engine,
182+ data__enable_elastic_disk,
183+ data__aws_attributes,
184+ data__cluster_log_conf,
185+ data__init_scripts,
186+ data__spark_conf,
187+ data__spark_env_vars,
188+ data__driver_node_type_id,
189+ data__ssh_public_keys,
190+ data__custom_tags,
191+ data__single_user_name,
192+ data__autotermination_minutes,
193+ data__enable_local_disk_encryption,
194+ data__policy_id,
195+ data__apply_policy_default_values,
196+ data__instance_pool_id,
197+ data__driver_instance_pool_id,
198+ data__docker_image,
199+ data__workload_type,
200+ data__clone_from
178201)
179202SELECT
180- ' {{ deployment_name }}' ,
181- ' {{ cluster_name }}' ,
182- ' {{ is_single_node }}' ,
183203' {{ kind }}' ,
204+ ' {{ cluster_name }}' ,
184205' {{ spark_version }}' ,
206+ {{ use_ml_runtime }},
207+ {{ is_single_node }},
185208' {{ node_type_id }}' ,
186- ' {{ aws_attributes }}'
209+ ' {{ autoscale }}' ,
210+ {{ num_workers }},
211+ ' {{ data_security_mode }}' ,
212+ ' {{ runtime_engine }}' ,
213+ {{ enable_elastic_disk }},
214+ ' {{ aws_attributes }}' ,
215+ ' {{ cluster_log_conf }}' ,
216+ ' {{ init_scripts }}' ,
217+ ' {{ spark_conf }}' ,
218+ ' {{ spark_env_vars }}' ,
219+ ' {{ driver_node_type_id }}' ,
220+ ' {{ ssh_public_keys }}' ,
221+ ' {{ custom_tags }}' ,
222+ ' {{ single_user_name }}' ,
223+ {{ autotermination_minutes }},
224+ {{ enable_local_disk_encryption }},
225+ ' {{ policy_id }}' ,
226+ {{ apply_policy_default_values }},
227+ ' {{ instance_pool_id }}' ,
228+ ' {{ driver_instance_pool_id }}' ,
229+ ' {{ docker_image }}' ,
230+ ' {{ workload_type }}' ,
231+ ' {{ clone_from }}'
187232;
188233```
189234
190235</TabItem >
191236<TabItem value =" manifest " >
192237
193238``` yaml
194- - name : your_resource_model_name
239+ - name : databricks_cluster_resource
195240 props :
241+ - name : kind
242+ description : The kind of compute described by this compute specification (required)
243+ value : " CLASSIC_PREVIEW"
196244 - name : cluster_name
245+ description : Cluster name requested by the user, does not need to be unique. If not specified at creation, the cluster name will be an empty string.
197246 value : single-node-with-kind-cluster
198- - name : is_single_node
199- value : true
200- - name : kind
201- value : CLASSIC_PREVIEW
202247 - name : spark_version
248+ description : The Spark version of the cluster, e.g. 3.3.x-scala2.11 (required)
203249 value : 14.3.x-scala2.12
250+ - name : use_ml_runtime
251+ description : Controls ML runtime usage, dependent on kind field and node_type_id (GPU)
252+ value : false
253+ - name : is_single_node
254+ description : When true, configures single node related tags, conf, and workers
255+ value : true
204256 - name : node_type_id
257+ description : The node type for provisioning cluster resources (e.g. memory/compute optimized)
205258 value : i3.xlarge
259+ - name : autoscale
260+ description : Parameters for automatic cluster scaling based on load
261+ value :
262+ min_workers : 2 # Minimum number of workers cluster can scale down to (integer)
263+ max_workers : 8 # Maximum number of workers cluster can scale up to (integer)
264+ - name : num_workers
265+ description : Number of worker nodes in cluster, total nodes is num_workers + 1 (driver)
266+ value : 0
267+ - name : data_security_mode
268+ description : Data security mode determines data governance model for cluster access
269+ value : " SINGLE_USER" # Options listed below:
270+ # DATA_SECURITY_MODE_AUTO: Databricks chooses based on compute config
271+ # DATA_SECURITY_MODE_STANDARD: Alias for USER_ISOLATION
272+ # DATA_SECURITY_MODE_DEDICATED: Alias for SINGLE_USER
273+ # NONE: No security isolation, data governance unavailable
274+ # SINGLE_USER: Secure single-user cluster with full features
275+ # USER_ISOLATION: Secure multi-user cluster with some feature limits
276+ # Legacy modes (deprecated in DBR 15.0+):
277+ # LEGACY_TABLE_ACL: For legacy Table ACL migration
278+ # LEGACY_PASSTHROUGH: For legacy high concurrency clusters
279+ # LEGACY_SINGLE_USER: For legacy standard clusters
280+ # LEGACY_SINGLE_USER_STANDARD: No UC/passthrough mode
281+ - name : runtime_engine
282+ description : Determines cluster's runtime engine - standard or Photon
283+ value : " STANDARD" # Options - STANDARD, PHOTON
284+ - name : enable_elastic_disk
285+ description : Enables autoscaling of local storage for Spark
286+ value : true
206287 - name : aws_attributes
288+ description : AWS-specific attributes for cluster configuration
207289 value :
208- first_on_demand : 1
209- availability : SPOT_WITH_FALLBACK
210- zone_id : auto
211- spot_bid_price_percent : 100
212- ebs_volume_count : 0
213-
290+ first_on_demand : 1 # Number of on-demand instances before spot (integer)
291+ availability : SPOT_WITH_FALLBACK # Instance availability type - SPOT, ON_DEMAND, SPOT_WITH_FALLBACK
292+ zone_id : auto # AWS availability zone for cluster (e.g., us-west-2a)
293+ spot_bid_price_percent : 100 # Spot instance bid price as % of on-demand price (integer, max 10000)
294+ ebs_volume_count : 0 # Number of EBS volumes per instance (integer, 0-10)
295+ ebs_volume_size : 100 # Size of each EBS volume in GB (integer)
296+ ebs_volume_type : GENERAL_PURPOSE_SSD # EBS volume type (string)
297+ instance_profile_arn : " " # Optional IAM instance profile ARN for cluster nodes
298+ ebs_volume_iops : null # Optional IOPS for gp3 volumes
299+ ebs_volume_throughput : null # Optional throughput for gp3 volumes
300+ - name : cluster_log_conf
301+ description : Configuration for spark log delivery to storage
302+ value :
303+ dbfs : # Only one destination type allowed
304+ destination : " dbfs:/cluster-logs" # DBFS path for logs
305+ # Alternative S3 configuration:
306+ # s3:
307+ # destination: "s3://my-bucket/logs"
308+ # region: "us-west-2" # or endpoint URL
309+ # enable_encryption: false
310+ # encryption_type: "sse-s3" # or sse-kms
311+ # kms_key: "" # KMS key for sse-kms
312+ # canned_acl: "bucket-owner-full-control"
313+ - name : init_scripts
314+ description : Scripts to run during cluster startup
315+ value :
316+ - workspace : # Workspace file location
317+ destination : " /Users/user@example.com/init.sh"
318+ # Alternative locations:
319+ # - dbfs:
320+ # destination: "dbfs:/path/init.sh"
321+ # - s3:
322+ # destination: "s3://bucket/init.sh"
323+ # region: "us-west-2"
324+ # - volumes:
325+ # destination: "/Volumes/init.sh"
326+ - name : spark_conf
327+ description : Spark configuration key-value pairs including JVM options
328+ value :
329+ " spark.speculation " : true
330+ " spark.streaming.ui.retainedBatches " : 5
331+ - name : spark_env_vars
332+ description : Environment variables set on all cluster nodes
333+ value :
334+ " SPARK_WORKER_MEMORY " : " 28000m"
335+ " SPARK_LOCAL_DIRS " : " /local_disk0"
336+ - name : driver_node_type_id
337+ description : Optional separate node type for Spark driver (defaults to node_type_id)
338+ value : i3.xlarge
339+ - name : ssh_public_keys
340+ description : SSH public keys added to each Spark node (max 10)
341+ value : []
342+ - name : custom_tags
343+ description : Additional tags for cluster resources (max 45 tags)
344+ value :
345+ Provisioner : stackql
346+ StackName : " {{ stack_name }}"
347+ StackEnv : " {{ stack_env }}"
348+ - name : single_user_name
349+ description : Username if data_security_mode is SINGLE_USER
350+ value : " user@example.com"
351+ - name : autotermination_minutes
352+ description : Minutes of inactivity before cluster termination (10-10000, 0 to disable)
353+ value : 120
354+ - name : enable_local_disk_encryption
355+ description : Enable LUKS encryption on cluster VM local disks
356+ value : true
357+ - name : policy_id
358+ description : ID of cluster policy to apply
359+ value : " "
360+ - name : apply_policy_default_values
361+ description : Use policy defaults (true) or only fixed values (false) for omitted fields
362+ value : false
363+ - name : instance_pool_id
364+ description : ID of instance pool for cluster nodes
365+ value : " "
366+ - name : driver_instance_pool_id
367+ description : Optional separate instance pool ID for driver node
368+ value : " "
369+ - name : docker_image
370+ description : Custom docker image configuration
371+ value :
372+ url : " repo/image:tag" # Docker image URL
373+ basic_auth :
374+ username : " " # Registry authentication
375+ password : " "
376+ - name : workload_type
377+ description : Defines what type of clients can use the cluster
378+ value :
379+ clients :
380+ notebooks : true # Allow notebook workloads
381+ jobs : true # Allow job workloads
382+ - name : clone_from
383+ description : When specified, this clones libraries from a source cluster during the creation of a new cluster.
384+ value :
385+ source_cluster_id : " 1202-211320-brick1" # The cluster that is being cloned (required).
214386` ` `
215387
216388</TabItem>
0 commit comments