A Spark driver is not a general-purpose database library. You can't run DDL or execute stored procedures with it. But the Spark driver is built on top of the JDBC driver, which you can use directly in either Scala or Java. EG
%scala
import java.sql.DriverManager
import java.sql.Connection
import java.util.Properties
val jdbcHostname = "yourServerName.database.windows.net"
val jdbcPort = 1433
val jdbcDatabase = "yourDbName"
val jdbcUsername = dbutils.secrets.get(scope = "keyvault", key = "sqluser")
val jdbcPassword = dbutils.secrets.get(scope = "keyvault", key = "sqlpassword")
// Create the JDBC URL without passing in the user and password parameters.
val jdbcUrl = s"jdbc:sqlserver://${jdbcHostname}:${jdbcPort};database=${jdbcDatabase}"
// Create a Properties() object to hold the parameters.
val connectionProperties = new Properties()
connectionProperties.put("user", s"${jdbcUsername}")
connectionProperties.put("password", s"${jdbcPassword}")
val driverClass = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
connectionProperties.setProperty("Driver", driverClass)
val con = DriverManager.getConnection(jdbcUrl, connectionProperties)
val stmt = con.createStatement()
stmt.execute("create table whatever(....")