Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f18f8fe8 authored by PO HUNG CHEN's avatar PO HUNG CHEN Committed by Android (Google) Code Review
Browse files

Merge "Refine the binder latency test" into oc-dev

parents 0556d79e 60c513c5
Loading
Loading
Loading
Loading
+31 −12
Original line number Diff line number Diff line
@@ -40,7 +40,7 @@ vector<sp<IBinder> > workers;
// GOOD_SYNC_MIN is considered as good
#define GOOD_SYNC_MIN (0.6)

#define DUMP_PRICISION 3
#define DUMP_PRESICION 2

string trace_path = "/sys/kernel/debug/tracing";

@@ -246,10 +246,11 @@ struct Results {
    double worst = (double)m_worst / 1.0E6;
    double average = (double)m_total_time / m_transactions / 1.0E6;
    // FIXME: libjson?
    cout << std::setprecision(DUMP_PRICISION) << "{ \"avg\":" << setw(5) << left
         << average << ", \"wst\":" << setw(5) << left << worst
         << ", \"bst\":" << setw(5) << left << best << ", \"miss\":" << setw(5)
         << left << m_miss << ", \"meetR\":" << setw(3) << left
    int W = DUMP_PRESICION + 2;
    cout << setprecision(DUMP_PRESICION) << "{ \"avg\":" << setw(W) << left
         << average << ",\"wst\":" << setw(W) << left << worst
         << ",\"bst\":" << setw(W) << left << best << ",\"miss\":" << left
         << m_miss << ",\"meetR\":" << left << setprecision(DUMP_PRESICION + 3)
         << (1.0 - (double)m_miss / m_transactions) << "}";
  }
};
@@ -272,8 +273,15 @@ static void parcel_fill(Parcel& data, int sz, int priority, int cpu) {
  }
}

typedef struct {
  void* result;
  int target;
} thread_priv_t;

static void* thread_start(void* p) {
  Results* results_fifo = (Results*)p;
  thread_priv_t* priv = (thread_priv_t*)p;
  int target = priv->target;
  Results* results_fifo = (Results*)priv->result;
  Parcel data, reply;
  Tick sta, end;

@@ -281,7 +289,7 @@ static void* thread_start(void* p) {
  thread_dump("fifo-caller");

  sta = tickNow();
  status_t ret = workers[0]->transact(BINDER_NOP, data, &reply);
  status_t ret = workers[target]->transact(BINDER_NOP, data, &reply);
  end = tickNow();
  results_fifo->add_time(tickNano(sta, end));

@@ -291,16 +299,19 @@ static void* thread_start(void* p) {
}

// create a fifo thread to transact and wait it to finished
static void thread_transaction(Results* results_fifo) {
static void thread_transaction(int target, Results* results_fifo) {
  thread_priv_t thread_priv;
  void* dummy;
  pthread_t thread;
  pthread_attr_t attr;
  struct sched_param param;
  thread_priv.target = target;
  thread_priv.result = results_fifo;
  ASSERT(!pthread_attr_init(&attr));
  ASSERT(!pthread_attr_setschedpolicy(&attr, SCHED_FIFO));
  param.sched_priority = sched_get_priority_max(SCHED_FIFO);
  ASSERT(!pthread_attr_setschedparam(&attr, &param));
  ASSERT(!pthread_create(&thread, &attr, &thread_start, results_fifo));
  ASSERT(!pthread_create(&thread, &attr, &thread_start, &thread_priv));
  ASSERT(!pthread_join(thread, &dummy));
}

@@ -316,7 +327,9 @@ void worker_fx(int num, int no_process, int iterations, int payload_size,
  sp<IServiceManager> serviceMgr = defaultServiceManager();
  sp<BinderWorkerService> service = new BinderWorkerService;
  serviceMgr->addService(generateServiceName(num), service);
  // init done
  p.signal();
  // wait for kick-off
  p.wait();

  // If client/server pairs, then half the workers are
@@ -338,7 +351,7 @@ void worker_fx(int num, int no_process, int iterations, int payload_size,
    int target = num % server_count;

    // 1. transaction by fifo thread
    thread_transaction(&results_fifo);
    thread_transaction(target, &results_fifo);
    parcel_fill(data, payload_size, thread_pri(), sched_getcpu());
    thread_dump("other-caller");

@@ -356,6 +369,7 @@ void worker_fx(int num, int no_process, int iterations, int payload_size,
  p.wait();

  p.send(&dummy);
  // wait for kill
  p.wait();
  // Client for each pair dump here
  if (is_client(num)) {
@@ -463,12 +477,17 @@ int main(int argc, char** argv) {
  for (int i = 0; i < no_process; i++) {
    pipes.push_back(make_process(i, iterations, no_process, payload_size));
  }
  // wait for init done
  wait_all(pipes);
  // kick-off iterations
  signal_all(pipes);
  // wait for completion
  wait_all(pipes);
  // start to send result
  signal_all(pipes);
  for (int i = 0; i < no_process; i++) {
    int status;
    // kill
    pipes[i].signal();
    wait(&status);
    // the exit status is number of transactions without priority inheritance