Hello Gomathi,
I appreciate your prompt reply. Despite following your advice, I am still encountering an error. Interestingly, when I copied the JSON from the error and manually ran it through Postman, it worked fine. Below, you'll find the error message extracted from the data flow. Please note that I have scrambled the JSON values as they contain real-time data.
Job aborted due to stage failure: Task 0 in stage 31.0 failed 1 times, most recent failure: Lost task 0.0 in stage 31.0 (TID 28) (vm-2da25753 executor 1): com.microsoft.dataflow.Issues: DF-REST_001 - Error response from server: Some(), Status code: 415. Please check your request url and body. (url:https://ebwt-test.fa.ap1.oraclecloud.com//fscmRestApi/resources/11.13.18.05/erpintegrations/,request body: Some({"OperationName":"importBulkData","DocumentContent":"xxxxxxxx","ContentType":"csv","FileName":"xxx_20231206.csv","DocumentAccount":"xx$/yy$/gg$","JobName":"/ff/apps/ggg/xx/xx/xx/common/,fgggh","ParameterList":"Operations,xx,yy,ALL,N,N,N","JobOptions":"InterfaceDetails=15,EnableEvent=Y,ImportOption=Y ,PurgeOption = N,ExtractFileType=ALL"}), request method: POST)
at com.microsoft.dataflow.Utils$.failure(Utils.scala:76)
at org.apache.spark.sql.execution.datasources.rest.RestClient.ensureSuccessResponse(RestClient.scala:595)
at org.apache.spark.sql.execution.datasources.rest.RestClient.executeRequest(RestClient.scala:580)
at org.apache.spark.sql.execution.datasources.rest.RestClient.$anonfun$readPage$2(RestClient.scala:443)
at scala.Option.map(Option.scala:230)
at org.apache.spark.sql.execution.datasources.rest.RestClient.readPage(RestClient.scala:443)
at org.apache.spark.sql.execution.datasources.rest.RestClient.callExecuteSingleRowRequest(RestClient.scala:283)
at org.apache.spark.sql.execution.datasources.rest.RestClient.callResources(RestClient.scala:138)
at com.microsoft.dataflow.store.rest.RestCallee.call(RestStore.scala:329)
at com.microsoft.dataflow.spark.CallExec.$anonfun$doExecute$7(CallExec.scala:117)
at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
at scala.collection.TraversableOnce$FlattenOps$$anon$2.hasNext(TraversableOnce.scala:469)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:762)
at org.apache.spark.sql.execution.columnar.DefaultCachedBatchSerializer$$anon$1.hasNext(InMemoryRelation.scala:118)
at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
at org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:237)
at org.apache.spark.storage.memory.MemoryStore.putIteratorAsBytes(MemoryStore.scala:365)
at org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1441)
at org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1351)
at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1415)
at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1238)
at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:385)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:336)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:57)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:374)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:338)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:57)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:374)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:338)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:57)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:374)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:338)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:57)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:374)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:338)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:131)
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:498)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1439)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:501)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:750)
Driver stacktrace: