Freigeben über


AnalyticsSynapseArtifactsModelFactory.SparkBatchJob Methode

Definition

Initialisiert eine neue instance von SparkBatchJob.

public static Azure.Analytics.Synapse.Artifacts.Models.SparkBatchJob SparkBatchJob (Azure.Analytics.Synapse.Artifacts.Models.SparkBatchJobState livyInfo = default, string name = default, string workspaceName = default, string sparkPoolName = default, string submitterName = default, string submitterId = default, string artifactId = default, Azure.Analytics.Synapse.Artifacts.Models.SparkJobType? jobType = default, Azure.Analytics.Synapse.Artifacts.Models.SparkBatchJobResultType? result = default, Azure.Analytics.Synapse.Artifacts.Models.SparkScheduler scheduler = default, Azure.Analytics.Synapse.Artifacts.Models.SparkServicePlugin plugin = default, System.Collections.Generic.IEnumerable<Azure.Analytics.Synapse.Artifacts.Models.SparkServiceError> errors = default, System.Collections.Generic.IReadOnlyDictionary<string,string> tags = default, int id = 0, string appId = default, System.Collections.Generic.IReadOnlyDictionary<string,string> appInfo = default, Azure.Analytics.Synapse.Artifacts.Models.LivyStates? state = default, System.Collections.Generic.IEnumerable<string> logLines = default);
static member SparkBatchJob : Azure.Analytics.Synapse.Artifacts.Models.SparkBatchJobState * string * string * string * string * string * string * Nullable<Azure.Analytics.Synapse.Artifacts.Models.SparkJobType> * Nullable<Azure.Analytics.Synapse.Artifacts.Models.SparkBatchJobResultType> * Azure.Analytics.Synapse.Artifacts.Models.SparkScheduler * Azure.Analytics.Synapse.Artifacts.Models.SparkServicePlugin * seq<Azure.Analytics.Synapse.Artifacts.Models.SparkServiceError> * System.Collections.Generic.IReadOnlyDictionary<string, string> * int * string * System.Collections.Generic.IReadOnlyDictionary<string, string> * Nullable<Azure.Analytics.Synapse.Artifacts.Models.LivyStates> * seq<string> -> Azure.Analytics.Synapse.Artifacts.Models.SparkBatchJob
Public Shared Function SparkBatchJob (Optional livyInfo As SparkBatchJobState = Nothing, Optional name As String = Nothing, Optional workspaceName As String = Nothing, Optional sparkPoolName As String = Nothing, Optional submitterName As String = Nothing, Optional submitterId As String = Nothing, Optional artifactId As String = Nothing, Optional jobType As Nullable(Of SparkJobType) = Nothing, Optional result As Nullable(Of SparkBatchJobResultType) = Nothing, Optional scheduler As SparkScheduler = Nothing, Optional plugin As SparkServicePlugin = Nothing, Optional errors As IEnumerable(Of SparkServiceError) = Nothing, Optional tags As IReadOnlyDictionary(Of String, String) = Nothing, Optional id As Integer = 0, Optional appId As String = Nothing, Optional appInfo As IReadOnlyDictionary(Of String, String) = Nothing, Optional state As Nullable(Of LivyStates) = Nothing, Optional logLines As IEnumerable(Of String) = Nothing) As SparkBatchJob

Parameter

name
String

Der Batchname.

workspaceName
String

Der Arbeitsbereichsname.

sparkPoolName
String

Der Name des Spark-Pools.

submitterName
String

Der Name des Einreichers.

submitterId
String

Der Einreicherbezeichner.

artifactId
String

Der Artefaktbezeichner.

jobType
Nullable<SparkJobType>

Der Auftragstyp.

result
Nullable<SparkBatchJobResultType>

Das Ergebnis des Spark-Batchauftrags.

scheduler
SparkScheduler

Die Planerinformationen.

plugin
SparkServicePlugin

Die Plug-In-Informationen.

errors
IEnumerable<SparkServiceError>

Die Fehlerinformationen.

id
Int32

Die Sitzungs-ID.

appId
String

Die Anwendungs-ID dieser Sitzung.

appInfo
IReadOnlyDictionary<String,String>

Die detaillierten Anwendungsinformationen.

state
Nullable<LivyStates>

Der Batchstatus.

logLines
IEnumerable<String>

Die Protokollzeilen.

Gibt zurück

Eine neue SparkBatchJob instance zum Mocking.

Gilt für: