Gatling reporting higher response time for the first request group in a scenario

1.3k Views Asked by At

I am using id "com.github.lkishalmi.gatling" version "3.2.9" to run my gatling performance tests

below is my simulation code

  print("TIME "+System.currentTimeMillis())
  val httpConf = http.baseUrl("http://abc.io")
  val httpConf2 = http.baseUrl("  http://abc.io")
  val scenario_name = "only1_1in10"
  val scn = scenario(scenario_name)
    .exec(
      http("370kb"+"_"+scenario_name)
        .post("/pulse/swift/upload?startTime="+System.currentTimeMillis())
        //.body(StringBody("""{ "runId": """" + 0 + """", "imageName":"""" + imageName + """" }""")).asJson
        .bodyPart(RawFileBodyPart("file","src/gatling/simulations/370kb.png")).asMultipartForm
    ).exec(
    http("370kb_next"+"_"+scenario_name)
      .post("/pulse/swift/upload?startTime="+System.currentTimeMillis())
      //.body(StringBody("""{ "runId": """" + 0 + """", "imageName":"""" + imageName + """" }""")).asJson
      .bodyPart(RawFileBodyPart("file","src/gatling/simulations/370kb.png")).asMultipartForm
  ).exec(
    http("370kb_next_next"+"_"+scenario_name)
      .post("/pulse/swift/upload?startTime="+System.currentTimeMillis())
      //.body(StringBody("""{ "runId": """" + 0 + """", "imageName":"""" + imageName + """" }""")).asJson
      .bodyPart(RawFileBodyPart("file","src/gatling/simulations/370kb.png")).asMultipartForm
  )



  setUp(
    scn.inject(
      constantUsersPerSec(1) during (10)
    )
  ).protocols(httpConf).assertions(forAll.failedRequests.percent.is(0))

I am just uploading images to my server. The server inturn pushes these images to a kafka Queue and responds with a 200

The issue I am having all the requests in first http group is always slow..while the other http groups are way faster. I am aware that the first request will take long time as server needs some time to warm up. However I am confused why all the 10 requests are slow.

below is the response time distribution for same image for above code

enter image description here

enter image description here

enter image description here

Can someone explain why the response time keeps on improving. What is the difference between the first group of requests and subsequent group of requests?

My server is a simple Spring Boot server which takes a multipart request and pushes it to a Kafka Queue.


code after seperating in different scenario

import io.gatling.http.Predef._
import io.gatling.core.Predef._
class OneSimulation extends Simulation {
  print("TIME "+System.currentTimeMillis())
  val httpConf = http.baseUrl("http://abc.io")
  val httpConf2 = http.baseUrl("  http://abc.io")
  val scenario_name = "only1_1in10"
  val scn = scenario(scenario_name)
    .exec(
      http("370kb"+"_"+scenario_name)
        .post("/pulse/swift/upload?startTime="+System.currentTimeMillis())
        //.body(StringBody("""{ "runId": """" + 0 + """", "imageName":"""" + imageName + """" }""")).asJson
        .bodyPart(RawFileBodyPart("file","src/gatling/simulations/370kb.png")).asMultipartForm
    )

  val scenario_name2 = "only1_1in10_2"
  val scn2 = scenario(scenario_name2)
    .exec(
      http("370kb"+"_"+scenario_name2)
        .post("/pulse/swift/upload?startTime="+System.currentTimeMillis())
        //.body(StringBody("""{ "runId": """" + 0 + """", "imageName":"""" + imageName + """" }""")).asJson
        .bodyPart(RawFileBodyPart("file","src/gatling/simulations/370kb.png")).asMultipartForm
    )

  val scenario_name3 = "only1_1in10_3"
  val scn3 = scenario(scenario_name3)
    .exec(
      http("370kb"+"_"+scenario_name3)
        .post("/pulse/swift/upload?startTime="+System.currentTimeMillis())
        //.body(StringBody("""{ "runId": """" + 0 + """", "imageName":"""" + imageName + """" }""")).asJson
        .bodyPart(RawFileBodyPart("file","src/gatling/simulations/370kb.png")).asMultipartForm
    )



  setUp(
    scn.inject(
       //atOnceUsers(20)
      //rampUsers(10)during(10)
      constantUsersPerSec(1) during (10)
      //atOnceUsers(20),
    ),
    scn2.inject(
      //atOnceUsers(20)
      //rampUsers(10)during(10)
      constantUsersPerSec(1) during (10)
      //atOnceUsers(20),
    ),
    scn3.inject(
      //atOnceUsers(20)
      //rampUsers(10)during(10)
      constantUsersPerSec(1) during (10)
      //atOnceUsers(20),
    )

    //rampUsersPerSec(10) to(20) during(10) randomized)
  ).protocols(httpConf).assertions(forAll.failedRequests.percent.is(0))




}

enter image description here

enter image description here

enter image description here

seperating out in different scenarios gives similar response times. However putting all request in same scenario gives me slower response time for first group but better response time for subsequent groups. Can someone help me explain me this behavior

0

There are 0 best solutions below