~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to drizzled/slave.cc

  • Committer: Monty Taylor
  • Date: 2008-10-02 01:31:53 UTC
  • mfrom: (398.1.6 codestyle-new)
  • Revision ID: monty@inaugust.com-20081002013153-b097om921cd3j1pn
MergedĀ fromĀ stdint-includes-fix.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1297
1297
        slave is 2. At SHOW SLAVE STATUS time, assume that the difference
1298
1298
        between timestamp of slave and rli->last_master_timestamp is 0
1299
1299
        (i.e. they are in the same second), then we get 0-(2-1)=-1 as a result.
1300
 
        This confuses users, so we don't go below 0: hence the max().
 
1300
        This confuses users, so we don't go below 0: hence the cmax().
1301
1301
 
1302
1302
        last_master_timestamp == 0 (an "impossible" timestamp 1970) is a
1303
1303
        special marker to say "consider we have caught up".
1304
1304
      */
1305
1305
      protocol->store((int64_t)(mi->rli.last_master_timestamp ?
1306
 
                                 max((long)0, time_diff) : 0));
 
1306
                                 cmax((long)0, time_diff) : 0));
1307
1307
    }
1308
1308
    else
1309
1309
    {
1829
1829
            exec_res= 0;
1830
1830
            end_trans(thd, ROLLBACK);
1831
1831
            /* chance for concurrent connection to get more locks */
1832
 
            safe_sleep(thd, min(rli->trans_retries, (uint32_t)MAX_SLAVE_RETRY_PAUSE),
 
1832
            safe_sleep(thd, cmin(rli->trans_retries, (uint32_t)MAX_SLAVE_RETRY_PAUSE),
1833
1833
                       (CHECK_KILLED_FUNC)sql_slave_killed, (void*)rli);
1834
1834
            pthread_mutex_lock(&rli->data_lock); // because of SHOW STATUS
1835
1835
            rli->trans_retries++;
2178
2178
  sql_print_information(_("Slave I/O thread exiting, read up to log '%s', "
2179
2179
                          "position %s"),
2180
2180
                        IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff));
2181
 
  VOID(pthread_mutex_lock(&LOCK_thread_count));
 
2181
  pthread_mutex_lock(&LOCK_thread_count);
2182
2182
  thd->query = thd->db = 0; // extra safety
2183
2183
  thd->query_length= thd->db_length= 0;
2184
 
  VOID(pthread_mutex_unlock(&LOCK_thread_count));
 
2184
  pthread_mutex_unlock(&LOCK_thread_count);
2185
2185
  if (drizzle)
2186
2186
  {
2187
2187
    /*
2447
2447
    must "proactively" clear playgrounds:
2448
2448
  */
2449
2449
  rli->cleanup_context(thd, 1);
2450
 
  VOID(pthread_mutex_lock(&LOCK_thread_count));
 
2450
  pthread_mutex_lock(&LOCK_thread_count);
2451
2451
  /*
2452
2452
    Some extra safety, which should not been needed (normally, event deletion
2453
2453
    should already have done these assignments (each event which sets these
2455
2455
  */
2456
2456
  thd->query= thd->db= thd->catalog= 0;
2457
2457
  thd->query_length= thd->db_length= 0;
2458
 
  VOID(pthread_mutex_unlock(&LOCK_thread_count));
 
2458
  pthread_mutex_unlock(&LOCK_thread_count);
2459
2459
  thd_proc_info(thd, "Waiting for slave mutex on exit");
2460
2460
  pthread_mutex_lock(&rli->run_lock);
2461
2461
  /* We need data_lock, at least to wake up any waiting master_pos_wait() */
3305
3305
    relay_log_pos       Current log pos
3306
3306
    pending             Number of bytes already processed from the event
3307
3307
  */
3308
 
  rli->event_relay_log_pos= max(rli->event_relay_log_pos, (uint64_t)BIN_LOG_HEADER_SIZE);
 
3308
  rli->event_relay_log_pos= cmax(rli->event_relay_log_pos, (uint64_t)BIN_LOG_HEADER_SIZE);
3309
3309
  my_b_seek(cur_log,rli->event_relay_log_pos);
3310
3310
  return(cur_log);
3311
3311
}