MT#55283 convert rtpe_now to int64_t

First step in converting (almost) all timeval uses to int64_t

Change-Id: If20fd749c61c278273c535b68287df7e6f042808
master
Richard Fuchs 2 weeks ago
parent d75e44829d
commit c70510303a

@ -28,15 +28,15 @@ static bool audio_player_run(struct media_player *mp) {
if (!ap || !ap->ptime_us)
return false;
ap->last_run = rtpe_now; // equals mp->next_run
ap->last_run = timeval_from_us(rtpe_now); // equals mp->next_run
unsigned int size;
void *buf = mix_buffer_read_fast(&ap->mb, ap->ptime, &size);
if (!buf) {
if (!size) {
// error or not active: just reschedule
timeval_add_usec(&mp->next_run, ap->ptime_us);
timerthread_obj_schedule_abs(&mp->tt_obj, &mp->next_run);
mp->next_run = timeval_add_usec(mp->next_run, ap->ptime_us);
timerthread_obj_schedule_abs(&mp->tt_obj, mp->next_run);
return false;
}
buf = g_alloca(size);
@ -167,11 +167,11 @@ void audio_player_start(struct call_media *m) {
ilogs(transcoding, LOG_DEBUG, "Starting audio player");
ap->last_run = rtpe_now;
ap->last_run = timeval_from_us(rtpe_now);
mp->next_run = rtpe_now;
timeval_add_usec(&mp->next_run, ap->ptime_us);
timerthread_obj_schedule_abs(&mp->tt_obj, &mp->next_run);
mp->next_run = timeval_from_us(rtpe_now);
mp->next_run = timeval_add_usec(mp->next_run, ap->ptime_us);
timerthread_obj_schedule_abs(&mp->tt_obj, mp->next_run);
}

@ -92,7 +92,7 @@ static int call_timer_delete_monologues(call_t *c) {
if (!ml->deleted)
continue;
if (ml->deleted > rtpe_now.tv_sec) {
if (ml->deleted > timeval_from_us(rtpe_now).tv_sec) {
if (!min_deleted || ml->deleted < min_deleted)
min_deleted = ml->deleted;
continue;
@ -142,7 +142,7 @@ static void call_timer_iterator(call_t *c, struct iterator_helper *hlp) {
// final timeout applicable to all calls (own and foreign)
if (atomic_get_na(&rtpe_config.final_timeout)
&& rtpe_now.tv_sec >= (c->created.tv_sec + atomic_get_na(&rtpe_config.final_timeout)))
&& timeval_from_us(rtpe_now).tv_sec >= (c->created.tv_sec + atomic_get_na(&rtpe_config.final_timeout)))
{
ilog(LOG_INFO, "Closing call due to final timeout");
tmp_t_reason = FINAL_TIMEOUT;
@ -160,11 +160,11 @@ static void call_timer_iterator(call_t *c, struct iterator_helper *hlp) {
goto out;
}
if (c->deleted && rtpe_now.tv_sec >= c->deleted
if (c->deleted && timeval_from_us(rtpe_now).tv_sec >= c->deleted
&& c->last_signal <= c->deleted)
goto delete;
if (c->ml_deleted && rtpe_now.tv_sec >= c->ml_deleted) {
if (c->ml_deleted && timeval_from_us(rtpe_now).tv_sec >= c->ml_deleted) {
if (call_timer_delete_monologues(c))
goto delete;
}
@ -175,7 +175,7 @@ static void call_timer_iterator(call_t *c, struct iterator_helper *hlp) {
// ignore media timeout if call was recently taken over
if (CALL_ISSET(c, FOREIGN_MEDIA)
&& rtpe_now.tv_sec - c->last_signal <= atomic_get_na(&rtpe_config.timeout))
&& timeval_from_us(rtpe_now).tv_sec - c->last_signal <= atomic_get_na(&rtpe_config.timeout))
goto out;
ice_fragments_cleanup(c->sdp_fragments, false);
@ -199,18 +199,18 @@ static void call_timer_iterator(call_t *c, struct iterator_helper *hlp) {
timestamp = atomic64_get_na(&ps->media->ice_agent->last_activity);
if (PS_ISSET(ps, RTP)) {
if (rtpe_now.tv_sec - atomic64_get_na(&ps->stats_in->last_packet) < 2) {
if (timeval_from_us(rtpe_now).tv_sec - atomic64_get_na(&ps->stats_in->last_packet) < 2) {
// kernel activity
if (rtpe_now.tv_sec - atomic64_get_na(&ps->last_packet) < 2)
if (timeval_from_us(rtpe_now).tv_sec - atomic64_get_na(&ps->last_packet) < 2)
hlp->user_kernel_streams++; // user activity
else
hlp->kernel_streams++;
}
else if (rtpe_now.tv_sec - atomic64_get_na(&ps->last_packet) < 2)
else if (timeval_from_us(rtpe_now).tv_sec - atomic64_get_na(&ps->last_packet) < 2)
hlp->user_streams++; // user activity
}
bool active_media = (rtpe_now.tv_sec - packet_stream_last_packet(ps) < 1);
bool active_media = (timeval_from_us(rtpe_now).tv_sec - packet_stream_last_packet(ps) < 1);
if (active_media)
CALL_CLEAR(sfd->call, FOREIGN_MEDIA);
@ -219,7 +219,7 @@ static void call_timer_iterator(call_t *c, struct iterator_helper *hlp) {
if (!ctx)
break;
if (rtpe_now.tv_sec - atomic64_get_na(&ctx->stats->last_packet) < 2)
if (timeval_from_us(rtpe_now).tv_sec - atomic64_get_na(&ctx->stats->last_packet) < 2)
payload_tracker_add(&ctx->tracker,
atomic_get_na(&ctx->stats->last_pt));
}
@ -228,7 +228,7 @@ static void call_timer_iterator(call_t *c, struct iterator_helper *hlp) {
if (!ctx)
break;
if (rtpe_now.tv_sec - atomic64_get_na(&ctx->stats->last_packet) < 2)
if (timeval_from_us(rtpe_now).tv_sec - atomic64_get_na(&ctx->stats->last_packet) < 2)
payload_tracker_add(&ctx->tracker,
atomic_get_na(&ctx->stats->last_pt));
}
@ -249,7 +249,7 @@ no_sfd:
tmp_t_reason = OFFER_TIMEOUT;
}
if (timestamp > rtpe_now.tv_sec || rtpe_now.tv_sec - timestamp < check)
if (timestamp > timeval_from_us(rtpe_now).tv_sec || timeval_from_us(rtpe_now).tv_sec - timestamp < check)
good = true;
next:
@ -272,7 +272,7 @@ next:
goto out;
// update every 5 minutes
if (has_srtp && rtpe_now.tv_sec - atomic64_get_na(&c->last_redis_update) > 60*5)
if (has_srtp && timeval_from_us(rtpe_now).tv_sec - atomic64_get_na(&c->last_redis_update) > 60*5)
do_update = true;
goto out;
@ -976,7 +976,7 @@ struct packet_stream *__packet_stream_new(call_t *call) {
mutex_init(&stream->in_lock);
mutex_init(&stream->out_lock);
stream->call = call;
atomic64_set_na(&stream->last_packet, rtpe_now.tv_sec);
atomic64_set_na(&stream->last_packet, timeval_from_us(rtpe_now).tv_sec);
stream->rtp_stats = rtp_stats_ht_new();
recording_init_stream(stream);
stream->send_timer = send_timer_new(stream);
@ -1019,7 +1019,7 @@ static void __fill_stream(struct packet_stream *ps, const struct endpoint *epp,
struct endpoint ep;
struct call_media *media = ps->media;
atomic64_set_na(&ps->last_packet, rtpe_now.tv_sec);
atomic64_set_na(&ps->last_packet, timeval_from_us(rtpe_now).tv_sec);
ep = *epp;
ep.port += port_off;
@ -2721,7 +2721,7 @@ static void __call_monologue_init_from_flags(struct call_monologue *ml, struct c
{
call_t *call = ml->call;
call->last_signal = rtpe_now.tv_sec;
call->last_signal = timeval_from_us(rtpe_now).tv_sec;
call->deleted = 0;
call->media_rec_slots = (flags->media_rec_slots > 0 && call->media_rec_slots == 0)
? flags->media_rec_slots
@ -3950,7 +3950,7 @@ out:
void add_total_calls_duration_in_interval(struct timeval *interval_tv) {
struct timeval ongoing_calls_dur = add_ongoing_calls_dur_in_interval(
&rtpe_latest_graphite_interval_start, interval_tv);
RTPE_STATS_ADD(total_calls_duration_intv, timeval_us(&ongoing_calls_dur));
RTPE_STATS_ADD(total_calls_duration_intv, timeval_us(ongoing_calls_dur));
}
static struct timeval add_ongoing_calls_dur_in_interval(struct timeval *interval_start,
@ -3965,10 +3965,10 @@ static struct timeval add_ongoing_calls_dur_in_interval(struct timeval *interval
goto next;
ml = call->monologues.head->data;
if (timercmp(interval_start, &ml->started, >)) {
res = timeval_add(&res, interval_duration);
res = timeval_add(res, *interval_duration);
} else {
call_duration = timeval_subtract(&rtpe_now, &ml->started);
res = timeval_add(&res, &call_duration);
call_duration = timeval_subtract(timeval_from_us(rtpe_now), ml->started);
res = timeval_add(res, call_duration);
}
next:
;
@ -4094,8 +4094,8 @@ void call_destroy(call_t *c) {
ml->label.s ? " (label '" : "",
STR_FMT(ml->label.s ? &ml->label : &STR_EMPTY),
ml->label.s ? "')" : "",
(unsigned int) (rtpe_now.tv_sec - ml->created) / 60,
(unsigned int) (rtpe_now.tv_sec - ml->created) % 60,
(unsigned int) (timeval_from_us(rtpe_now).tv_sec - ml->created) / 60,
(unsigned int) (timeval_from_us(rtpe_now).tv_sec - ml->created) % 60,
STR_FMT_M(&ml->viabranch));
for (__auto_type alias = ml->tag_aliases.head; alias; alias = alias->next)
@ -4176,7 +4176,7 @@ void call_destroy(call_t *c) {
atomic64_get_na(&ps->stats_in->packets),
atomic64_get_na(&ps->stats_in->bytes),
atomic64_get_na(&ps->stats_in->errors),
rtpe_now.tv_sec - packet_stream_last_packet(ps),
timeval_from_us(rtpe_now).tv_sec - packet_stream_last_packet(ps),
atomic64_get_na(&ps->stats_out->packets),
atomic64_get_na(&ps->stats_out->bytes),
atomic64_get_na(&ps->stats_out->errors));
@ -4200,12 +4200,12 @@ void call_destroy(call_t *c) {
se->average_mos.mos / mos_samples % 10,
se->lowest_mos->mos / 10,
se->lowest_mos->mos % 10,
(unsigned int) (timeval_diff(&se->lowest_mos->reported, &c->created) / 1000000) / 60,
(unsigned int) (timeval_diff(&se->lowest_mos->reported, &c->created) / 1000000) % 60,
(unsigned int) (timeval_diff(se->lowest_mos->reported, c->created) / 1000000) / 60,
(unsigned int) (timeval_diff(se->lowest_mos->reported, c->created) / 1000000) % 60,
se->highest_mos->mos / 10,
se->highest_mos->mos % 10,
(unsigned int) (timeval_diff(&se->highest_mos->reported, &c->created) / 1000000) / 60,
(unsigned int) (timeval_diff(&se->highest_mos->reported, &c->created) / 1000000) % 60,
(unsigned int) (timeval_diff(se->highest_mos->reported, c->created) / 1000000) / 60,
(unsigned int) (timeval_diff(se->highest_mos->reported, c->created) / 1000000) % 60,
(unsigned int) se->packets_lost);
ilog(LOG_INFO, "------ respective (avg/min/max) jitter %" PRIu64 "/%" PRIu64 "/%" PRIu64 " ms, "
"RTT-e2e %" PRIu64 ".%" PRIu64 "/%" PRIu64 ".%" PRIu64
@ -4389,7 +4389,7 @@ static call_t *call_create(const str *callid) {
c->labels = labels_ht_new();
call_memory_arena_set(c);
c->callid = call_str_cpy(callid);
c->created = rtpe_now;
c->created = timeval_from_us(rtpe_now);
c->dtls_cert = dtls_cert();
c->tos = rtpe_config.default_tos;
c->poller = rtpe_get_poller();
@ -4680,7 +4680,7 @@ struct call_monologue *__monologue_create(call_t *call) {
ret = uid_alloc(&call->monologues);
ret->call = call;
ret->created = rtpe_now.tv_sec;
ret->created = timeval_from_us(rtpe_now).tv_sec;
ret->associated_tags = g_hash_table_new(g_direct_hash, g_direct_equal);
ret->medias = medias_arr_new();
ret->media_ids = media_id_ht_new();
@ -4898,7 +4898,7 @@ static bool monologue_delete_iter(struct call_monologue *a, int delete_delay) {
ilog(LOG_INFO, "Scheduling deletion of call branch '" STR_FORMAT_M "' "
"(via-branch '" STR_FORMAT_M "') in %d seconds",
STR_FMT_M(&a->tag), STR_FMT0_M(&a->viabranch), delete_delay);
a->deleted = rtpe_now.tv_sec + delete_delay;
a->deleted = timeval_from_us(rtpe_now).tv_sec + delete_delay;
if (!call->ml_deleted || call->ml_deleted > a->deleted)
call->ml_deleted = a->deleted;
}
@ -5321,7 +5321,7 @@ int call_delete_branch(call_t *c, const str *branch,
}
do_delete:
c->destroyed = rtpe_now;
c->destroyed = timeval_from_us(rtpe_now);
/* stop media player and all medias of ml.
* same for media subscribers */
@ -5354,11 +5354,11 @@ del_all:
monologue_stop(ml, false);
}
c->destroyed = rtpe_now;
c->destroyed = timeval_from_us(rtpe_now);
if (delete_delay > 0) {
ilog(LOG_INFO, "Scheduling deletion of entire call in %d seconds", delete_delay);
c->deleted = rtpe_now.tv_sec + delete_delay;
c->deleted = timeval_from_us(rtpe_now).tv_sec + delete_delay;
rwlock_unlock_w(&c->master_lock);
}
else {

@ -420,7 +420,7 @@ str call_query_udp(char **out) {
rwlock_unlock_w(&c->master_lock);
ret = str_sprintf("%s %lld %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", out[RE_UDP_COOKIE],
(long long int) atomic_get_na(&rtpe_config.silent_timeout) - (rtpe_now.tv_sec - stats.last_packet),
(long long int) atomic_get_na(&rtpe_config.silent_timeout) - (timeval_from_us(rtpe_now).tv_sec - stats.last_packet),
atomic64_get_na(&stats.totals[0].packets), atomic64_get_na(&stats.totals[1].packets),
atomic64_get_na(&stats.totals[2].packets), atomic64_get_na(&stats.totals[3].packets));
goto out;
@ -446,9 +446,9 @@ static void call_status_iterator(call_t *c, struct streambuf_stream *s) {
// mutex_lock(&c->master_lock);
streambuf_printf(s->outbuf, "session "STR_FORMAT" - - - - %lli\n",
streambuf_printf(s->outbuf, "session "STR_FORMAT" - - - - %" PRId64 "\n",
STR_FMT(&c->callid),
timeval_diff(&rtpe_now, &c->created) / 1000000);
timeval_diff(timeval_from_us(rtpe_now), c->created) / 1000000);
/* XXX restore function */

@ -60,7 +60,7 @@ void cdr_update_entry(call_t * c) {
ml->term_reason = UNKNOWN;
}
tim_result_duration = timeval_subtract(&ml->terminated, &ml->started);
tim_result_duration = timeval_subtract(ml->terminated, ml->started);
if (_log_facility_cdr) {
g_string_append_printf(cdr,

@ -726,7 +726,7 @@ static void cli_list_tag_info(struct cli_writer *cw, struct call_monologue *ml)
else
now = ml->terminated;
tim_result_duration = timeval_subtract(&now, &ml->started);
tim_result_duration = timeval_subtract(now, ml->started);
cw->cw_printf(cw, "--- Tag '" STR_FORMAT "', type: %s, label '" STR_FORMAT "', "
"branch '" STR_FORMAT "', "
@ -1248,7 +1248,7 @@ static void cli_incoming_active_standby(struct cli_writer *cw, bool foreign) {
ITERATE_CALL_LIST_START(CALL_ITERATOR_MAIN, c);
rwlock_lock_w(&c->master_lock);
call_make_own_foreign(c, foreign);
c->last_signal = MAX(c->last_signal, rtpe_now.tv_sec);
c->last_signal = MAX(c->last_signal, timeval_from_us(rtpe_now).tv_sec);
if (!foreign) {
CALL_SET(c, FOREIGN_MEDIA); // ignore timeout until we have media
c->last_signal++; // we are authoritative now
@ -1826,7 +1826,7 @@ static void cli_incoming_list_transcoders(str *instr, struct cli_writer *cw, con
if (t_hash_table_size(rtpe_codec_stats) == 0)
cw->cw_printf(cw, "No stats entries\n");
else {
int last_tv_sec = rtpe_now.tv_sec - 1;
int last_tv_sec = timeval_from_us(rtpe_now).tv_sec - 1;
unsigned int idx = last_tv_sec & 1;
codec_stats_ht_iter iter;
@ -1885,10 +1885,10 @@ static void cli_incoming_media_list_files(str *instr, struct cli_writer *cw, con
str *name = t_queue_pop_head(&list);
time_t atime, mtime;
if (media_player_get_file_times(name, &mtime, &atime))
cw->cw_printf(cw, STR_FORMAT ", loaded %lu s ago, last used %lu s ago\n",
cw->cw_printf(cw, STR_FORMAT ", loaded %" PRId64 " s ago, last used %" PRId64 " s ago\n",
STR_FMT(name),
(long) rtpe_now.tv_sec - mtime,
(long) rtpe_now.tv_sec - atime);
timeval_from_us(rtpe_now).tv_sec - mtime,
timeval_from_us(rtpe_now).tv_sec - atime);
str_free(name);
}
}
@ -1900,9 +1900,9 @@ static void cli_incoming_media_list_dbs(str *instr, struct cli_writer *cw, const
unsigned long long id = GPOINTER_TO_UINT(idp);
time_t atime, mtime;
if (media_player_get_db_times(id, &mtime, &atime))
cw->cw_printf(cw, "%llu, loaded %lu s ago, last used %lu s ago\n", id,
(long) rtpe_now.tv_sec - mtime,
(long) rtpe_now.tv_sec - atime);
cw->cw_printf(cw, "%llu, loaded %" PRId64 " s ago, last used %" PRId64 " s ago\n", id,
timeval_from_us(rtpe_now).tv_sec - mtime,
timeval_from_us(rtpe_now).tv_sec - atime);
}
}
@ -2071,8 +2071,8 @@ static void cli_incoming_media_list_caches(str *instr, struct cli_writer *cw, co
time_t atime, mtime;
if (media_player_get_cache_times(id, &mtime, &atime))
cw->cw_printf(cw, "%llu, loaded %lu s ago, last used %lu s ago\n", id,
(long) rtpe_now.tv_sec - mtime,
(long) rtpe_now.tv_sec - atime);
(long) timeval_from_us(rtpe_now).tv_sec - mtime,
(long) timeval_from_us(rtpe_now).tv_sec - atime);
}
}

@ -1284,12 +1284,12 @@ static void __codec_rtcp_timer_schedule(struct call_media *media) {
rt->ct.tt_obj.tt = &codec_timers_thread;
rt->call = obj_get(media->call);
rt->media = media;
rt->ct.next = rtpe_now;
rt->ct.next = timeval_from_us(rtpe_now);
rt->ct.timer_func = __rtcp_timer_run;
}
timeval_add_usec(&rt->ct.next, rtpe_config.rtcp_interval * 1000 + (ssl_random() % 1000000));
timerthread_obj_schedule_abs(&rt->ct.tt_obj, &rt->ct.next);
rt->ct.next = timeval_add_usec(rt->ct.next, rtpe_config.rtcp_interval * 1000 + (ssl_random() % 1000000));
timerthread_obj_schedule_abs(&rt->ct.tt_obj, rt->ct.next);
}
// no lock held
static void __rtcp_timer_run(struct codec_timer *ct) {
@ -1978,8 +1978,8 @@ static void __mqtt_timer_run_summary(struct codec_timer *ct) {
mqtt_timer_run_summary();
}
static void __codec_mqtt_timer_schedule(struct mqtt_timer *mqt) {
timeval_add_usec(&mqt->ct.next, rtpe_config.mqtt_publish_interval * 1000);
timerthread_obj_schedule_abs(&mqt->ct.tt_obj, &mqt->ct.next);
mqt->ct.next = timeval_add_usec(mqt->ct.next, rtpe_config.mqtt_publish_interval * 1000);
timerthread_obj_schedule_abs(&mqt->ct.tt_obj, mqt->ct.next);
}
// master lock held in W
void mqtt_timer_start(struct mqtt_timer **mqtp, call_t *call, struct call_media *media) {
@ -1991,7 +1991,7 @@ void mqtt_timer_start(struct mqtt_timer **mqtp, call_t *call, struct call_media
mqt->call = call ? obj_get(call) : NULL;
mqt->self = mqtp;
mqt->media = media;
mqt->ct.next = rtpe_now;
mqt->ct.next = timeval_from_us(rtpe_now);
if (media)
mqt->ct.timer_func = __mqtt_timer_run_media;
@ -2117,7 +2117,7 @@ static int handler_func_passthrough(struct codec_handler *h, struct media_packet
uint32_t ts = 0;
if (mp->rtp) {
ts = ntohl(mp->rtp->timestamp);
codec_calc_jitter(mp->ssrc_in, ts, h->source_pt.clock_rate, &mp->tv);
codec_calc_jitter(mp->ssrc_in, ts, h->source_pt.clock_rate, mp->tv);
codec_calc_lost(mp->ssrc_in, ntohs(mp->rtp->seq_num));
if (ML_ISSET(mp->media->monologue, BLOCK_SHORT) && h->source_pt.codec_def
@ -2401,15 +2401,15 @@ void codec_output_rtp(struct media_packet *mp, struct codec_scheduler *csch,
ssrc_ctx_hold(ssrc_out);
p->ssrc_out = ssrc_out;
long long ts_diff_us = 0;
int64_t ts_diff_us = 0;
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
// ignore scheduling if a sequence number was supplied. in that case we're just doing
// passthrough forwarding (or are handling some other prepared RTP stream) and want
// to send the packet out immediately.
if (seq != -1) {
p->ttq_entry.when = rtpe_now;
p->ttq_entry.when = timeval_from_us(rtpe_now);
goto send;
}
@ -2420,40 +2420,40 @@ void codec_output_rtp(struct media_packet *mp, struct codec_scheduler *csch,
p->ttq_entry.when = csch->first_send;
uint32_t ts_diff = (uint32_t) ts - (uint32_t) csch->first_send_ts; // allow for wrap-around
ts_diff += ts_delay;
ts_diff_us = (unsigned long long) ts_diff * 1000000 / handler->dest_pt.clock_rate;
timeval_add_usec(&p->ttq_entry.when, ts_diff_us);
ts_diff_us = ts_diff * 1000000LL / handler->dest_pt.clock_rate;
p->ttq_entry.when = timeval_add_usec(p->ttq_entry.when, ts_diff_us);
// how far in the future is this?
ts_diff_us = timeval_diff(&p->ttq_entry.when, &rtpe_now);
ts_diff_us = timeval_diff(p->ttq_entry.when, timeval_from_us(rtpe_now));
if (ts_diff_us > 1000000 || ts_diff_us < -1000000) // more than one second, can't be right
csch->first_send.tv_sec = 0; // fix it up below
}
if (!csch->first_send.tv_sec || !p->ttq_entry.when.tv_sec) {
p->ttq_entry.when = csch->first_send = rtpe_now;
p->ttq_entry.when = csch->first_send = timeval_from_us(rtpe_now);
csch->first_send_ts = ts;
}
ts_diff_us = timeval_diff(&p->ttq_entry.when, &rtpe_now);
ts_diff_us = timeval_diff(p->ttq_entry.when, timeval_from_us(rtpe_now));
csch->output_skew = csch->output_skew * 15 / 16 + ts_diff_us / 16;
if (csch->output_skew > 50000 && ts_diff_us > 10000) { // arbitrary value, 50 ms, 10 ms shift
ilogs(transcoding, LOG_DEBUG, "Steady clock skew of %li.%01li ms detected, shifting send timer back by 10 ms",
csch->output_skew / 1000,
(csch->output_skew % 1000) / 100);
timeval_add_usec(&p->ttq_entry.when, -10000);
p->ttq_entry.when = timeval_add_usec(p->ttq_entry.when, -10000);
csch->output_skew -= 10000;
csch->first_send_ts += handler->dest_pt.clock_rate / 100;
ts_diff_us = timeval_diff(&p->ttq_entry.when, &rtpe_now);
ts_diff_us = timeval_diff(p->ttq_entry.when, timeval_from_us(rtpe_now));
}
else if (ts_diff_us < 0) {
ts_diff_us *= -1;
ilogs(transcoding, LOG_DEBUG, "Negative clock skew of %lli.%01lli ms detected, shifting send timer forward",
ilogs(transcoding, LOG_DEBUG, "Negative clock skew of %" PRId64 ".%01" PRId64 " ms detected, shifting send timer forward",
ts_diff_us / 1000,
(ts_diff_us % 1000) / 100);
timeval_add_usec(&p->ttq_entry.when, ts_diff_us);
p->ttq_entry.when = timeval_add_usec(p->ttq_entry.when, ts_diff_us);
csch->output_skew = 0;
csch->first_send_ts -= (long long) handler->dest_pt.clock_rate * ts_diff_us / 1000000;
ts_diff_us = timeval_diff(&p->ttq_entry.when, &rtpe_now); // should be 0 now
ts_diff_us = timeval_diff(p->ttq_entry.when, timeval_from_us(rtpe_now)); // should be 0 now
}
send:
@ -2953,7 +2953,7 @@ static int handler_func_passthrough_ssrc(struct codec_handler *h, struct media_p
return 0;
uint32_t ts = ntohl(mp->rtp->timestamp);
codec_calc_jitter(mp->ssrc_in, ts, h->source_pt.clock_rate, &mp->tv);
codec_calc_jitter(mp->ssrc_in, ts, h->source_pt.clock_rate, mp->tv);
codec_calc_lost(mp->ssrc_in, ntohs(mp->rtp->seq_num));
// save original payload in case DTMF mangles it
@ -3110,17 +3110,17 @@ static int codec_decoder_event(enum codec_event event, void *ptr, void *data) {
case CE_AMR_CMR_RECV:
// ignore locking and races for this
media->encoder_callback.amr.cmr_in = GPOINTER_TO_UINT(ptr);
media->encoder_callback.amr.cmr_in_ts = rtpe_now;
media->encoder_callback.amr.cmr_in_ts = timeval_from_us(rtpe_now);
break;
case CE_AMR_SEND_CMR:
// ignore locking and races for this
media->encoder_callback.amr.cmr_out = GPOINTER_TO_UINT(ptr);
media->encoder_callback.amr.cmr_out_ts = rtpe_now;
media->encoder_callback.amr.cmr_out_ts = timeval_from_us(rtpe_now);
break;
case CE_EVS_CMR_RECV:
// ignore locking and races for this
media->encoder_callback.evs.cmr_in = GPOINTER_TO_UINT(ptr);
media->encoder_callback.evs.cmr_in_ts = rtpe_now;
media->encoder_callback.evs.cmr_in_ts = timeval_from_us(rtpe_now);
break;
default:
break;
@ -3138,9 +3138,9 @@ static void __delay_buffer_schedule(struct delay_buffer *dbuf) {
return;
struct timeval to_run = dframe->mp.tv;
timeval_add_usec(&to_run, dbuf->delay * 1000);
to_run = timeval_add_usec(to_run, dbuf->delay * 1000);
dbuf->ct.next = to_run;
timerthread_obj_schedule_abs(&dbuf->ct.tt_obj, &dbuf->ct.next);
timerthread_obj_schedule_abs(&dbuf->ct.tt_obj, dbuf->ct.next);
}
static bool __buffer_delay_do_direct(struct delay_buffer *dbuf) {
@ -3153,7 +3153,7 @@ static bool __buffer_delay_do_direct(struct delay_buffer *dbuf) {
}
static int delay_frame_cmp(const struct delay_frame *a, const struct delay_frame *b, void *ptr) {
return -1 * timeval_cmp(&a->mp.tv, &b->mp.tv);
return -1 * timeval_cmp(a->mp.tv, b->mp.tv);
}
INLINE struct codec_ssrc_handler *ssrc_handler_get(struct codec_ssrc_handler *ch) {
@ -3315,7 +3315,7 @@ static bool __buffer_dtx(struct dtx_buffer *dtxb, struct codec_ssrc_handler *dec
mutex_lock(&dtxb->lock);
dtxb->start = rtpe_now.tv_sec;
dtxb->start = timeval_from_us(rtpe_now).tv_sec;
t_queue_push_tail(&dtxb->packets, dtxp);
ilogs(dtx, LOG_DEBUG, "Adding packet (TS %lu) to DTX buffer; now %i packets in DTX queue",
ts, dtxb->packets.length);
@ -3325,8 +3325,8 @@ static bool __buffer_dtx(struct dtx_buffer *dtxb, struct codec_ssrc_handler *dec
if (!dtxb->ssrc)
dtxb->ssrc = mp->ssrc_in->parent->h.ssrc;
dtxb->ct.next = mp->tv;
timeval_add_usec(&dtxb->ct.next, rtpe_config.dtx_delay * 1000);
timerthread_obj_schedule_abs(&dtxb->ct.tt_obj, &dtxb->ct.next);
dtxb->ct.next = timeval_add_usec(dtxb->ct.next, rtpe_config.dtx_delay * 1000);
timerthread_obj_schedule_abs(&dtxb->ct.tt_obj, dtxb->ct.next);
}
// packet now consumed if there was one
@ -3629,7 +3629,7 @@ static bool __dtx_drift_shift(struct dtx_buffer *dtxb, unsigned long ts,
"(%li ms < %i ms), "
"pushing DTX timer forward my %i ms",
tv_diff / 1000, rtpe_config.dtx_delay, rtpe_config.dtx_shift);
timeval_add_usec(&dtxb->ct.next, rtpe_config.dtx_shift * 1000);
dtxb->ct.next = timeval_add_usec(dtxb->ct.next, rtpe_config.dtx_shift * 1000);
}
else if (ts_diff < dtxb->tspp) {
// TS underflow
@ -3642,7 +3642,7 @@ static bool __dtx_drift_shift(struct dtx_buffer *dtxb, unsigned long ts,
"(TS %lu, diff %li), "
"pushing DTX timer forward by %i ms and discarding packet",
ts, ts_diff, rtpe_config.dtx_shift);
timeval_add_usec(&dtxb->ct.next, rtpe_config.dtx_shift * 1000);
dtxb->ct.next = timeval_add_usec(dtxb->ct.next, rtpe_config.dtx_shift * 1000);
discard = true;
}
}
@ -3656,7 +3656,7 @@ static bool __dtx_drift_shift(struct dtx_buffer *dtxb, unsigned long ts,
ilogs(dtx, LOG_DEBUG, "DTX timer queue overflowing (%i packets in queue, "
"%lli ms delay), speeding up DTX timer by %i ms",
dtxb->packets.length, ts_diff_us / 1000, rtpe_config.dtx_shift);
timeval_add_usec(&dtxb->ct.next, rtpe_config.dtx_shift * -1000);
dtxb->ct.next = timeval_add_usec(dtxb->ct.next, rtpe_config.dtx_shift * -1000);
}
}
@ -3712,7 +3712,7 @@ static void __dtx_send_later(struct codec_timer *ct) {
int ret = 0;
unsigned long ts;
int p_left = 0;
long tv_diff = -1, ts_diff = 0;
int64_t tv_diff = -1, ts_diff = 0;
mutex_lock(&dtxb->lock);
@ -3769,7 +3769,7 @@ static void __dtx_send_later(struct codec_timer *ct) {
ts = dtxb->head_ts = dtxp->packet->ts;
else
ts = dtxb->head_ts;
tv_diff = timeval_diff(&rtpe_now, &mp_copy.tv);
tv_diff = timeval_diff(timeval_from_us(rtpe_now), mp_copy.tv);
}
else {
// no packet ready to decode: DTX
@ -3921,7 +3921,7 @@ static void __dtx_send_later(struct codec_timer *ct) {
"Decoder error while processing buffered RTP packet");
}
else {
int diff = rtpe_now.tv_sec - dtxb_start;
int diff = timeval_from_us(rtpe_now).tv_sec - dtxb_start;
if (rtpe_config.max_dtx <= 0 || diff < rtpe_config.max_dtx) {
ilogs(dtx, LOG_DEBUG, "RTP media for TS %lu missing, triggering DTX", ts);
@ -3954,8 +3954,8 @@ static void __dtx_send_later(struct codec_timer *ct) {
}
// schedule next run
timeval_add_usec(&dtxb->ct.next, dtxb->ptime * 1000);
timerthread_obj_schedule_abs(&dtxb->ct.tt_obj, &dtxb->ct.next);
dtxb->ct.next = timeval_add_usec(dtxb->ct.next, dtxb->ptime * 1000);
timerthread_obj_schedule_abs(&dtxb->ct.tt_obj, dtxb->ct.next);
mutex_unlock(&dtxb->lock);
@ -4215,7 +4215,7 @@ static void async_chain_finish(AVPacket *pkt, void *async_cb_obj) {
struct transcode_job *j = async_cb_obj;
struct call *call = j->mp.call;
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
if (pkt) {
rwlock_lock_r(&call->master_lock);
@ -4615,7 +4615,7 @@ static int packet_decoded_common(decoder_t *decoder, AVFrame *frame, void *u1, v
struct codec_handler *h = ch->handler;
if (h->stats_entry) {
int idx = rtpe_now.tv_sec & 1;
int idx = timeval_from_us(rtpe_now).tv_sec & 1;
atomic64_add(&h->stats_entry->pcm_samples[idx], frame->nb_samples);
atomic64_add(&h->stats_entry->pcm_samples[2], frame->nb_samples);
}
@ -4827,7 +4827,7 @@ void codec_update_all_source_handlers(struct call_monologue *ml, const sdp_ng_fl
void codec_calc_jitter(struct ssrc_ctx *ssrc, unsigned long ts, unsigned int clockrate,
const struct timeval *tv)
const struct timeval tv)
{
if (!ssrc || !clockrate)
return;
@ -4927,14 +4927,14 @@ static int handler_func_transcode(struct codec_handler *h, struct media_packet *
ntohl(mp->rtp->timestamp), mp->payload.len);
codec_calc_jitter(mp->ssrc_in, ntohl(mp->rtp->timestamp), h->input_handler->source_pt.clock_rate,
&mp->tv);
mp->tv);
if (h->stats_entry) {
unsigned int idx = rtpe_now.tv_sec & 1;
unsigned int idx = timeval_from_us(rtpe_now).tv_sec & 1;
int last_tv_sec = atomic_get_na(&h->stats_entry->last_tv_sec[idx]);
if (last_tv_sec != (int) rtpe_now.tv_sec) {
if (last_tv_sec != (int) timeval_from_us(rtpe_now).tv_sec) {
if (g_atomic_int_compare_and_exchange(&h->stats_entry->last_tv_sec[idx],
last_tv_sec, rtpe_now.tv_sec))
last_tv_sec, timeval_from_us(rtpe_now).tv_sec))
{
// new second - zero out stats. slight race condition here
atomic64_set(&h->stats_entry->packets_input[idx], 0);
@ -6245,9 +6245,9 @@ void codec_timer_callback(call_t *c, void (*func)(call_t *, codec_timer_callback
cb->timer_callback_func = func;
cb->arg = a;
cb->ct.timer_func = __codec_timer_callback_fire;
cb->ct.next = rtpe_now;
timeval_add_usec(&cb->ct.next, delay);
timerthread_obj_schedule_abs(&cb->ct.tt_obj, &cb->ct.next);
cb->ct.next = timeval_from_us(rtpe_now);
cb->ct.next = timeval_add_usec(cb->ct.next, delay);
timerthread_obj_schedule_abs(&cb->ct.tt_obj, cb->ct.next);
}
static void codec_timers_run(void *p) {
@ -6329,7 +6329,7 @@ static void codec_worker(void *d) {
mutex_unlock(&transcode_lock);
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
transcode_job_do(j);
mutex_lock(&transcode_lock);

@ -867,12 +867,12 @@ static void control_ng_process_payload(ng_ctx *hctx, str *reply, str *data, cons
// stop command timer
gettimeofday(&cmd_stop, NULL);
//print command duration
cmd_process_time = timeval_subtract(&cmd_stop, &cmd_start);
cmd_process_time = timeval_subtract(cmd_stop, cmd_start);
if (command_ctx.opmode >= 0 && command_ctx.opmode < OP_COUNT) {
mutex_lock(&cur->cmd[command_ctx.opmode].lock);
cur->cmd[command_ctx.opmode].count++;
cur->cmd[command_ctx.opmode].time = timeval_add(&cur->cmd[command_ctx.opmode].time, &cmd_process_time);
cur->cmd[command_ctx.opmode].time = timeval_add(cur->cmd[command_ctx.opmode].time, cmd_process_time);
mutex_unlock(&cur->cmd[command_ctx.opmode].lock);
}
@ -883,7 +883,7 @@ static void control_ng_process_payload(ng_ctx *hctx, str *reply, str *data, cons
// update interval statistics
RTPE_STATS_INC(ng_commands[command_ctx.opmode]);
RTPE_STATS_SAMPLE(ng_command_times[command_ctx.opmode], timeval_us(&cmd_process_time));
RTPE_STATS_SAMPLE(ng_command_times[command_ctx.opmode], timeval_us(cmd_process_time));
goto send_resp;

@ -22,20 +22,20 @@ INLINE void cookie_cache_state_cleanup(struct cookie_cache_state *s) {
void cookie_cache_init(struct cookie_cache *c) {
cookie_cache_state_init(&c->current);
cookie_cache_state_init(&c->old);
c->swap_time = rtpe_now.tv_sec;
c->swap_time = timeval_from_us(rtpe_now).tv_sec;
mutex_init(&c->lock);
cond_init(&c->cond);
}
/* lock must be held */
static void __cookie_cache_check_swap(struct cookie_cache *c) {
if (rtpe_now.tv_sec - c->swap_time >= 30) {
if (timeval_from_us(rtpe_now).tv_sec - c->swap_time >= 30) {
g_hash_table_remove_all(c->old.cookies);
bencode_buffer_free(&c->old.buffer);
swap_ptrs(&c->old.cookies, &c->current.cookies);
c->old.buffer = c->current.buffer;
bencode_buffer_init(&c->current.buffer);
c->swap_time = rtpe_now.tv_sec;
c->swap_time = timeval_from_us(rtpe_now).tv_sec;
}
}

@ -406,7 +406,7 @@ static enum thread_looper_action __dtls_timer(void) {
if (!c)
return TLA_BREAK;
left = c->expires - rtpe_now.tv_sec;
left = c->expires - timeval_from_us(rtpe_now).tv_sec;
if (left > CERT_EXPIRY_TIME/2)
goto out;

@ -131,7 +131,7 @@ static void dtmf_bencode_and_notify(struct call_media *media, unsigned int event
bencode_dictionary_add_string(data, "type", "DTMF");
bencode_dictionary_add_string(data, "source_ip", sockaddr_print_buf(&fsin->address));
bencode_dictionary_add_integer(data, "timestamp", rtpe_now.tv_sec);
bencode_dictionary_add_integer(data, "timestamp", timeval_from_us(rtpe_now).tv_sec);
bencode_dictionary_add_integer(data, "event", event);
bencode_dictionary_add_integer(data, "duration", ((long long) duration * (1000000LL / clockrate)) / 1000LL);
bencode_dictionary_add_integer(data, "volume", volume);
@ -177,7 +177,7 @@ static GString *dtmf_json_print(struct call_media *media, unsigned int event, un
g_string_append_printf(buf, "],"
"\"type\":\"DTMF\",\"timestamp\":%lu,\"source_ip\":\"%s\","
"\"event\":%u,\"duration\":%u,\"volume\":%u}",
(unsigned long) rtpe_now.tv_sec,
(unsigned long) timeval_from_us(rtpe_now).tv_sec,
sockaddr_print_buf(&fsin->address),
(unsigned int) event,
(duration * (1000000 / clockrate)) / 1000,
@ -753,7 +753,7 @@ static const char *dtmf_inject_pcm(struct call_media *media, struct call_media *
.ssrc = htonl(ssrc_in->parent->h.ssrc),
};
struct media_packet packet = {
.tv = rtpe_now,
.tv = timeval_from_us(rtpe_now),
.call = call,
.media = media,
.media_out = sink,

@ -89,8 +89,8 @@ static int connect_to_graphite_server(const endpoint_t *graphite_ep) {
GString *print_graphite_data(void) {
long long time_diff_us = timeval_diff(&rtpe_now, &rtpe_latest_graphite_interval_start);
rtpe_latest_graphite_interval_start = rtpe_now;
int64_t time_diff_us = timeval_diff(timeval_from_us(rtpe_now), rtpe_latest_graphite_interval_start);
rtpe_latest_graphite_interval_start = timeval_from_us(rtpe_now);
stats_counters_calc_diff(rtpe_stats, &rtpe_stats_graphite_intv, &rtpe_stats_graphite_diff);
stats_rate_min_max_avg_sample(&rtpe_rate_graphite_min_max, &rtpe_rate_graphite_min_max_avg_sampled,
@ -109,7 +109,7 @@ GString *print_graphite_data(void) {
#define GPF(fmt, ...) \
if (graphite_prefix) \
g_string_append(graph_str, graphite_prefix); \
g_string_append_printf(graph_str, fmt " %llu\n", ##__VA_ARGS__, (unsigned long long)rtpe_now.tv_sec)
g_string_append_printf(graph_str, fmt " %llu\n", ##__VA_ARGS__, (unsigned long long)timeval_from_us(rtpe_now).tv_sec)
for (int i = 0; i < OP_COUNT; i++) {
GPF("%s_time_min %.6f", ng_command_strings_esc[i],
@ -188,7 +188,7 @@ GString *print_graphite_data(void) {
mutex_lock(&rtpe_codec_stats_lock);
int last_tv_sec = rtpe_now.tv_sec - 1;
int last_tv_sec = timeval_from_us(rtpe_now).tv_sec - 1;
unsigned int idx = last_tv_sec & 1;
codec_stats_ht_iter iter;
@ -215,7 +215,7 @@ GString *print_graphite_data(void) {
(unsigned long long) atomic64_get_na(&rtpe_gauge_graphite_min_max_sampled.min.total_sessions),
(unsigned long long) atomic64_get_na(&rtpe_gauge_graphite_min_max_sampled.max.total_sessions),
(double) atomic64_get_na(&rtpe_stats_graphite_diff.total_calls_duration_intv) / 1000000.0,
(unsigned long long ) rtpe_now.tv_sec);
(unsigned long long ) timeval_from_us(rtpe_now).tv_sec);
return graph_str;
}
@ -304,13 +304,13 @@ static void graphite_loop_run(endpoint_t *graphite_ep, int seconds) {
}
}
gettimeofday(&rtpe_now, NULL);
if (rtpe_now.tv_sec < next_run) {
rtpe_now = now_us();
if (timeval_from_us(rtpe_now).tv_sec < next_run) {
usleep(100000);
return;
}
next_run = rtpe_now.tv_sec + seconds;
next_run = timeval_from_us(rtpe_now).tv_sec + seconds;
if (graphite_sock.fd < 0 && connection_state == STATE_DISCONNECTED) {
connect_to_graphite_server(graphite_ep);
@ -319,7 +319,7 @@ static void graphite_loop_run(endpoint_t *graphite_ep, int seconds) {
if (graphite_sock.fd >= 0 && connection_state == STATE_CONNECTED) {
add_total_calls_duration_in_interval(&graphite_interval_tv);
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
rc = send_graphite_data();
if (rc < 0) {
ilog(LOG_ERROR,"Sending graphite data failed.");

@ -37,7 +37,7 @@ struct scheduler {
struct looper_thread {
enum thread_looper_action (*f)(void);
const char *name;
long long interval_us;
int64_t interval_us;
};
@ -288,19 +288,19 @@ static void thread_looper_helper(void *fp) {
struct looper_thread lh = *lhp;
g_free(lhp);
long long interval_us = lh.interval_us;
int64_t interval_us = lh.interval_us;
#ifdef ASAN_BUILD
interval_us = MIN(interval_us, 100000);
#endif
static const long long warn_limit_pct = 20; // 20%
long long warn_limit_us = interval_us * warn_limit_pct / 100;
static const int64_t warn_limit_pct = 20; // 20%
int64_t warn_limit_us = interval_us * warn_limit_pct / 100;
struct timespec interval_ts = {
.tv_sec = interval_us / 1000000,
.tv_nsec = (interval_us % 1000000) * 1000,
};
while (!rtpe_shutdown) {
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
enum thread_looper_action ret = lh.f();
@ -311,10 +311,10 @@ static void thread_looper_helper(void *fp) {
struct timeval stop;
gettimeofday(&stop, NULL);
long long duration_us = timeval_diff(&stop, &rtpe_now);
int64_t duration_us = timeval_diff(stop, timeval_from_us(rtpe_now));
if (duration_us > warn_limit_us)
ilog(LOG_WARN, "Run time of timer \"%s\": %lli.%06lli sec, "
"exceeding limit of %lli%% (%lli.%06lli sec)",
ilog(LOG_WARN, "Run time of timer \"%s\": %" PRId64" .%06" PRId64" sec, "
"exceeding limit of %" PRId64" %% (%" PRId64" .%06" PRId64" sec)",
lh.name,
duration_us / 1000000, duration_us % 1000000,
warn_limit_pct,
@ -337,7 +337,7 @@ static void thread_looper_helper(void *fp) {
void thread_create_looper(enum thread_looper_action (*f)(void), const char *scheduler, int priority,
const char *name,
long long interval_us)
int64_t interval_us)
{
struct looper_thread *lh = g_new(__typeof(*lh), 1);
*lh = (__typeof__(*lh)) {

@ -132,7 +132,7 @@ static void queue_sdp_fragment(ng_buffer *ngbuf, call_t *call, str *key, sdp_str
STR_FMT_M(&flags->call_id), STR_FMT_M(&flags->from_tag));
struct sdp_fragment *frag = g_new0(__typeof(*frag), 1);
frag->received = rtpe_now;
frag->received = timeval_from_us(rtpe_now);
frag->ngbuf = obj_get(ngbuf);
if (streams) {
frag->streams = *streams;
@ -174,7 +174,7 @@ void dequeue_sdp_fragments(struct call_monologue *monologue) {
struct sdp_fragment *frag;
while ((frag = t_queue_pop_head(frags))) {
if (timeval_diff(&rtpe_now, &frag->received) > MAX_FRAG_AGE)
if (timeval_diff(timeval_from_us(rtpe_now), frag->received) > MAX_FRAG_AGE)
goto next;
ilog(LOG_DEBUG, "Dequeuing SDP fragment for " STR_FORMAT_M "/" STR_FORMAT_M,
@ -194,7 +194,7 @@ static gboolean fragment_check_cleanup(str *key, fragment_q *frags, void *p) {
return TRUE;
while (frags->length) {
struct sdp_fragment *frag = frags->head->data;
if (!all && timeval_diff(&rtpe_now, &frag->received) <= MAX_FRAG_AGE)
if (!all && timeval_diff(timeval_from_us(rtpe_now), frag->received) <= MAX_FRAG_AGE)
break;
t_queue_pop_head(frags);
fragment_free(frag);
@ -378,7 +378,7 @@ static void __ice_agent_initialize(struct ice_agent *ag) {
create_random_ice_string(call, &ag->ufrag[1], 8);
create_random_ice_string(call, &ag->pwd[1], 26);
atomic64_set_na(&ag->last_activity, rtpe_now.tv_sec);
atomic64_set_na(&ag->last_activity, timeval_from_us(rtpe_now).tv_sec);
}
static struct ice_agent *__ice_agent_new(struct call_media *media) {
@ -460,7 +460,7 @@ void ice_update(struct ice_agent *ag, struct stream_params *sp, bool allow_reset
log_info_ice_agent(ag);
atomic64_set_na(&ag->last_activity, rtpe_now.tv_sec);
atomic64_set_na(&ag->last_activity, timeval_from_us(rtpe_now).tv_sec);
media = ag->media;
call = media->call;
@ -666,8 +666,8 @@ static void __ice_agent_free(struct ice_agent *ag) {
static void __agent_schedule(struct ice_agent *ag, unsigned long usec) {
struct timeval nxt;
nxt = rtpe_now;
timeval_add_usec(&nxt, usec);
nxt = timeval_from_us(rtpe_now);
nxt = timeval_add_usec(nxt, usec);
__agent_schedule_abs(ag, &nxt);
}
static void __agent_schedule_abs(struct ice_agent *ag, const struct timeval *tv) {
@ -684,11 +684,11 @@ static void __agent_schedule_abs(struct ice_agent *ag, const struct timeval *tv)
mutex_lock(&tt->lock);
if (ag->tt_obj.last_run.tv_sec) {
/* make sure we don't run more often than we should */
diff = timeval_diff(&nxt, &ag->tt_obj.last_run);
diff = timeval_diff(nxt, ag->tt_obj.last_run);
if (diff < TIMER_RUN_INTERVAL * 1000)
timeval_add_usec(&nxt, TIMER_RUN_INTERVAL * 1000 - diff);
nxt = timeval_add_usec(nxt, TIMER_RUN_INTERVAL * 1000 - diff);
}
timerthread_obj_schedule_abs_nl(&ag->tt_obj, &nxt);
timerthread_obj_schedule_abs_nl(&ag->tt_obj, nxt);
mutex_unlock(&tt->lock);
}
static void __agent_deschedule(struct ice_agent *ag) {
@ -731,7 +731,7 @@ static void __do_ice_check(struct ice_candidate_pair *pair) {
mutex_lock(&ag->lock);
pair->retransmit = rtpe_now;
pair->retransmit = timeval_from_us(rtpe_now);
if (!PAIR_SET(pair, IN_PROGRESS)) {
PAIR_CLEAR2(pair, FROZEN, FAILED);
pair->retransmit_ms = STUN_RETRANSMIT_INTERVAL;
@ -746,7 +746,7 @@ static void __do_ice_check(struct ice_candidate_pair *pair) {
pair->retransmit_ms *= 2;
pair->retransmits++;
}
timeval_add_usec(&pair->retransmit, pair->retransmit_ms * 1000);
pair->retransmit = timeval_add_usec(pair->retransmit, pair->retransmit_ms * 1000);
__agent_schedule_abs(pair->agent, &pair->retransmit);
memcpy(transact, pair->stun_transaction, sizeof(transact));
@ -828,7 +828,7 @@ static void __do_ice_checks(struct ice_agent *ag) {
if (!ag->pwd[0].s)
return;
atomic64_set_na(&ag->last_activity, rtpe_now.tv_sec);
atomic64_set_na(&ag->last_activity, timeval_from_us(rtpe_now).tv_sec);
__DBG("running checks, call "STR_FORMAT" tag "STR_FORMAT"", STR_FMT(&ag->call->callid),
STR_FMT(&ag->media->monologue->tag));
@ -837,9 +837,9 @@ static void __do_ice_checks(struct ice_agent *ag) {
/* check if we're done and should start nominating pairs */
if (AGENT_ISSET(ag, CONTROLLING) && !AGENT_ISSET(ag, NOMINATING) && ag->start_nominating.tv_sec) {
if (timeval_cmp(&rtpe_now, &ag->start_nominating) >= 0)
if (timeval_cmp(timeval_from_us(rtpe_now), ag->start_nominating) >= 0)
__nominate_pairs(ag);
timeval_lowest(&next_run, &ag->start_nominating);
next_run = timeval_lowest(next_run, ag->start_nominating);
}
/* triggered checks are preferred */
@ -847,7 +847,7 @@ static void __do_ice_checks(struct ice_agent *ag) {
if (pair) {
__DBG("running triggered check on " PAIR_FORMAT, PAIR_FMT(pair));
PAIR_CLEAR(pair, TRIGGERED);
next_run = rtpe_now;
next_run = timeval_from_us(rtpe_now);
goto check;
}
@ -874,11 +874,11 @@ static void __do_ice_checks(struct ice_agent *ag) {
if (valid && valid->pair_priority > pair->pair_priority)
continue;
if (timeval_cmp(&pair->retransmit, &rtpe_now) <= 0)
if (timeval_cmp(pair->retransmit, timeval_from_us(rtpe_now)) <= 0)
g_queue_push_tail(&retransmits, pair); /* can't run check directly
due to locks */
else
timeval_lowest(&next_run, &pair->retransmit);
next_run = timeval_lowest(next_run, pair->retransmit);
continue;
}
@ -1241,7 +1241,7 @@ int ice_request(stream_fd *sfd, const endpoint_t *src,
if (!ag)
return -1;
atomic64_set_na(&ag->last_activity, rtpe_now.tv_sec);
atomic64_set_na(&ag->last_activity, timeval_from_us(rtpe_now).tv_sec);
/* determine candidate pair */
{
@ -1350,7 +1350,7 @@ int ice_response(stream_fd *sfd, const endpoint_t *src,
if (!ag)
return -1;
atomic64_set_na(&ag->last_activity, rtpe_now.tv_sec);
atomic64_set_na(&ag->last_activity, timeval_from_us(rtpe_now).tv_sec);
{
LOCK(&ag->lock);
@ -1414,8 +1414,8 @@ int ice_response(stream_fd *sfd, const endpoint_t *src,
if (!ag->start_nominating.tv_sec) {
if (__check_succeeded_complete(ag)) {
ag->start_nominating = rtpe_now;
timeval_add_usec(&ag->start_nominating, 100000);
ag->start_nominating = timeval_from_us(rtpe_now);
ag->start_nominating = timeval_add_usec(ag->start_nominating, 100000);
__agent_schedule_abs(ag, &ag->start_nominating);
}
}

@ -86,7 +86,7 @@ static struct janus_session *janus_get_session(uint64_t id) {
if (!ret)
return NULL;
mutex_lock(&ret->lock);
ret->last_act = rtpe_now.tv_sec;
ret->last_act = timeval_from_us(rtpe_now).tv_sec;
mutex_unlock(&ret->lock);
return ret;
}
@ -1151,7 +1151,7 @@ static const char *janus_add_token(JsonReader *reader, JsonBuilder *builder, boo
return "JSON object does not contain 'token' key";
time_t *now = g_malloc(sizeof(*now));
*now = rtpe_now.tv_sec;
*now = timeval_from_us(rtpe_now).tv_sec;
mutex_lock(&janus_lock);
t_hash_table_replace(janus_tokens, g_strdup(token), now);
mutex_unlock(&janus_lock);
@ -1180,7 +1180,7 @@ static const char *janus_create(JsonReader *reader, JsonBuilder *builder, struct
__auto_type session = obj_alloc0(struct janus_session, __janus_session_free);
mutex_init(&session->lock);
mutex_lock(&session->lock); // not really necessary but Coverity complains
session->last_act = rtpe_now.tv_sec;
session->last_act = timeval_from_us(rtpe_now).tv_sec;
session->websockets = janus_websockets_ht_new();
session->handles = janus_handles_set_new();

@ -56,7 +56,7 @@ static void reset_jitter_buffer(struct jitter_buffer *jb) {
jb->clock_rate = 0;
jb->payload_type = 0;
jb->clock_drift_val = 0;
jb->prev_seq_ts = rtpe_now;
jb->prev_seq_ts = timeval_from_us(rtpe_now);
jb->prev_seq = 0;
jb->num_resets++;
@ -156,31 +156,31 @@ static int queue_packet(struct media_packet *mp, struct jb_packet *p) {
}
p->ttq_entry.when = jb->first_send;
long long ts_diff_us =
(long long) (ts_diff + (jb->rtptime_delta * jb->buffer_len))* 1000000 / clockrate;
int64_t ts_diff_us =
(ts_diff + (jb->rtptime_delta * jb->buffer_len))* 1000000 / clockrate;
ts_diff_us += ((long long) jb->clock_drift_val * seq_diff);
ts_diff_us += ((long long) jb->dtmf_mult_factor * DELAY_FACTOR);
ts_diff_us += jb->clock_drift_val * seq_diff;
ts_diff_us += jb->dtmf_mult_factor * DELAY_FACTOR;
timeval_add_usec(&p->ttq_entry.when, ts_diff_us);
p->ttq_entry.when = timeval_add_usec(p->ttq_entry.when, ts_diff_us);
ts_diff_us = timeval_diff(&p->ttq_entry.when, &rtpe_now);
ts_diff_us = timeval_diff(p->ttq_entry.when, timeval_from_us(rtpe_now));
if (ts_diff_us > 1000000) { // more than one second, can't be right
ilog(LOG_DEBUG, "Partial reset due to timestamp");
jb->first_send.tv_sec = 0;
p->ttq_entry.when = rtpe_now;
p->ttq_entry.when = timeval_from_us(rtpe_now);
}
if(jb->prev_seq_ts.tv_sec == 0)
jb->prev_seq_ts = rtpe_now;
jb->prev_seq_ts = timeval_from_us(rtpe_now);
if((timeval_diff(&p->ttq_entry.when, &jb->prev_seq_ts) < 0) && (curr_seq > jb->prev_seq)) {
if((timeval_diff(p->ttq_entry.when, jb->prev_seq_ts) < 0) && (curr_seq > jb->prev_seq)) {
p->ttq_entry.when = jb->prev_seq_ts;
timeval_add_usec(&p->ttq_entry.when, DELAY_FACTOR);
p->ttq_entry.when = timeval_add_usec(p->ttq_entry.when, DELAY_FACTOR);
}
if(timeval_diff(&p->ttq_entry.when, &jb->prev_seq_ts) > 0) {
if(timeval_diff(p->ttq_entry.when, jb->prev_seq_ts) > 0) {
jb->prev_seq_ts = p->ttq_entry.when;
jb->prev_seq = curr_seq;
}
@ -201,18 +201,18 @@ static int handle_clock_drift(struct media_packet *mp) {
if(((seq_diff % CLOCK_DRIFT_MULT) != 0) || !seq_diff)
return 0;
unsigned long ts = ntohl(mp->rtp->timestamp);
uint32_t ts = ntohl(mp->rtp->timestamp);
int payload_type = (mp->rtp->m_pt & 0x7f);
int clockrate = get_clock_rate(mp, payload_type);
if(!clockrate) {
return 0;
}
long ts_diff = (uint32_t) ts - (uint32_t) jb->first_send_ts;
long long ts_diff_us =
(long long) (ts_diff)* 1000000 / clockrate;
int64_t ts_diff = (uint32_t) ts - (uint32_t) jb->first_send_ts;
int64_t ts_diff_us =
ts_diff* 1000000 / clockrate;
struct timeval to_send = jb->first_send;
timeval_add_usec(&to_send, ts_diff_us);
long long time_diff = timeval_diff(&rtpe_now, &to_send);
to_send = timeval_add_usec(to_send, ts_diff_us);
int64_t time_diff = timeval_diff(timeval_from_us(rtpe_now), to_send);
jb->clock_drift_val = time_diff/seq_diff;
if(jb->clock_drift_val < -10000 || jb->clock_drift_val > 10000) { //disable jb if clock drift greater than 10 ms
@ -307,7 +307,7 @@ int buffer_packet(struct media_packet *mp, const str *s) {
}
goto end_unlock;
}
p->ttq_entry.when = jb->first_send = rtpe_now;
p->ttq_entry.when = jb->first_send = timeval_from_us(rtpe_now);
jb->first_send_ts = ts;
jb->first_seq = ntohs(mp->rtp->seq_num);
jb->ssrc = ntohl(mp->rtp->ssrc);

@ -1522,7 +1522,7 @@ fallback:
static void init_everything(charp_ht templates) {
bufferpool_init();
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
log_init(rtpe_common_config_ptr->log_name);
log_format(rtpe_config.log_format);
recording_fs_init(rtpe_config.spooldir, rtpe_config.rec_method, rtpe_config.rec_format);
@ -1568,7 +1568,7 @@ static void init_everything(charp_ht templates) {
static void create_everything(void) {
struct timeval tmp_tv;
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
// either one global poller, or one per thread for media sockets plus one for control sockets
@ -1755,7 +1755,7 @@ static void do_redis_restore(void) {
gettimeofday(&redis_stop, NULL);
// print redis restore duration
redis_diff += timeval_diff(&redis_stop, &redis_start) / 1000.0;
redis_diff += timeval_diff(redis_stop, redis_start) / 1000.0;
ilog(LOG_INFO, "Redis restore time = %.0lf ms", redis_diff);
}
@ -1774,7 +1774,7 @@ static void uring_poller_loop(void *ptr) {
thread_waker_add_generic(&wk);
while (!rtpe_shutdown) {
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
uring_poller_poll(p);
append_thread_lpr_to_glob_lpr();
log_info_reset();

@ -312,8 +312,8 @@ static void send_timer_rtcp(struct send_timer *st, struct ssrc_ctx *ssrc_out) {
rtcp_send_report(media, ssrc_out);
ssrc_out->next_rtcp = rtpe_now;
timeval_add_usec(&ssrc_out->next_rtcp, 5000000 + (ssl_random() % 2000000));
ssrc_out->next_rtcp = timeval_from_us(rtpe_now);
ssrc_out->next_rtcp = timeval_add_usec(ssrc_out->next_rtcp, 5000000 + (ssl_random() % 2000000));
}
struct async_send_req {
@ -345,7 +345,7 @@ static bool __send_timer_send_1(struct rtp_header *rh, struct packet_stream *sin
ntohs(rh->seq_num),
ntohl(rh->timestamp),
ntohl(rh->ssrc));
codec_calc_jitter(cp->ssrc_out, ntohl(rh->timestamp), cp->clockrate, &rtpe_now);
codec_calc_jitter(cp->ssrc_out, ntohl(rh->timestamp), cp->clockrate, timeval_from_us(rtpe_now));
}
else
ilog(LOG_DEBUG, "Forward to sink endpoint: local %s -> remote %s%s%s",
@ -406,7 +406,7 @@ static void __send_timer_send_common(struct send_timer *st, struct codec_packet
struct ssrc_ctx *ssrc_out = cp->ssrc_out;
if (ssrc_out && ssrc_out->next_rtcp.tv_sec) {
mutex_lock(&ssrc_out->parent->h.lock);
long long diff = timeval_diff(&ssrc_out->next_rtcp, &rtpe_now);
int64_t diff = timeval_diff(ssrc_out->next_rtcp, timeval_from_us(rtpe_now));
mutex_unlock(&ssrc_out->parent->h.lock);
if (diff < 0)
send_timer_rtcp(st, ssrc_out);
@ -470,17 +470,17 @@ typedef union {
static void media_player_coder_add_packet(struct media_player_coder *c,
void (*fn)(media_player_coder_add_packet_arg p, char *buf, size_t len,
long long us_dur, unsigned long long pts), media_player_coder_add_packet_arg p) {
int64_t us_dur, unsigned long long pts), media_player_coder_add_packet_arg p) {
// scale pts and duration according to sample rate
long long duration_scaled = c->pkt->duration * c->avstream->CODECPAR->sample_rate
int64_t duration_scaled = c->pkt->duration * c->avstream->CODECPAR->sample_rate
* c->avstream->time_base.num / c->avstream->time_base.den;
unsigned long long pts_scaled = c->pkt->pts * c->avstream->CODECPAR->sample_rate
* c->avstream->time_base.num / c->avstream->time_base.den;
long long us_dur = c->pkt->duration * 1000000LL * c->avstream->time_base.num
int64_t us_dur = c->pkt->duration * 1000000LL * c->avstream->time_base.num
/ c->avstream->time_base.den;
ilog(LOG_DEBUG, "read media packet: pts %llu duration %lli (scaled %llu/%lli, %lli us), "
ilog(LOG_DEBUG, "read media packet: pts %llu duration %lli (scaled %llu/%" PRId64 ", %" PRId64 " us), "
"sample rate %i, time_base %i/%i",
(unsigned long long) c->pkt->pts,
(long long) c->pkt->duration,
@ -554,7 +554,7 @@ retry:;
memcpy(buf, pkt->buf, len);
struct media_packet packet = {
.tv = rtpe_now,
.tv = timeval_from_us(rtpe_now),
.call = mp->call,
.media = mp->media,
.media_out = mp->media,
@ -568,7 +568,7 @@ retry:;
read_idx == 0, mp->seq++, 0, -1, 0);
mp->buffer_ts += pkt->duration_ts;
mp->sync_ts_tv = rtpe_now;
mp->sync_ts_tv = timeval_from_us(rtpe_now);
media_packet_encrypt(mp->crypt_handler->out->rtp_crypt, mp->sink, &packet);
@ -578,8 +578,8 @@ retry:;
mutex_unlock(&mp->sink->out_lock);
// schedule our next run
timeval_add_usec(&mp->next_run, us_dur);
timerthread_obj_schedule_abs(&mp->tt_obj, &mp->next_run);
mp->next_run = timeval_add_usec(mp->next_run, us_dur);
timerthread_obj_schedule_abs(&mp->tt_obj, mp->next_run);
return false;
}
@ -655,17 +655,17 @@ static void media_player_cached_reader_start(struct media_player *mp, str_case_v
mp->coder.handler = codec_handler_make_dummy(&entry->coder.handler->dest_pt, mp->media, codec_set);
mp->run_func = media_player_read_decoded_packet;
mp->next_run = rtpe_now;
mp->next_run = timeval_from_us(rtpe_now);
mp->coder.duration = entry->coder.duration;
// if we played anything before, scale our sync TS according to the time
// that has passed
if (mp->sync_ts_tv.tv_sec) {
long long ts_diff_us = timeval_diff(&rtpe_now, &mp->sync_ts_tv);
int64_t ts_diff_us = timeval_diff(timeval_from_us(rtpe_now), mp->sync_ts_tv);
mp->buffer_ts += fraction_divl(ts_diff_us * dst_pt->clock_rate / 1000000, &dst_pt->codec_def->default_clockrate_fact);
}
mp->sync_ts_tv = rtpe_now;
mp->sync_ts_tv = timeval_from_us(rtpe_now);
media_player_read_decoded_packet(mp);
}
@ -755,7 +755,7 @@ static bool media_player_cache_get_entry(struct media_player *mp,
}
static void media_player_cache_packet(struct media_player_cache_entry *entry, char *buf, size_t len,
long long us_dur, unsigned long long pts)
int64_t us_dur, unsigned long long pts)
{
// synthesise fake RTP header and media_packet context
@ -939,7 +939,7 @@ static int media_player_setup_common(struct media_player *mp, const rtp_payload_
// if we played anything before, scale our sync TS according to the time
// that has passed
if (mp->sync_ts_tv.tv_sec) {
long long ts_diff_us = timeval_diff(&rtpe_now, &mp->sync_ts_tv);
int64_t ts_diff_us = timeval_diff(timeval_from_us(rtpe_now), mp->sync_ts_tv);
mp->sync_ts += fraction_divl(ts_diff_us * (*dst_pt)->clock_rate / 1000000, &(*dst_pt)->codec_def->default_clockrate_fact);
}
@ -1016,7 +1016,7 @@ static int __ensure_codec_handler(struct media_player *mp, const rtp_payload_typ
// appropriate lock must be held
void media_player_add_packet(struct media_player *mp, char *buf, size_t len,
long long us_dur, unsigned long long pts)
int64_t us_dur, unsigned long long pts)
{
// synthesise fake RTP header and media_packet context
@ -1025,7 +1025,7 @@ void media_player_add_packet(struct media_player *mp, char *buf, size_t len,
.seq_num = htons(mp->seq),
};
struct media_packet packet = {
.tv = rtpe_now,
.tv = timeval_from_us(rtpe_now),
.call = mp->call,
.media = mp->media,
.media_out = mp->media,
@ -1039,7 +1039,7 @@ void media_player_add_packet(struct media_player *mp, char *buf, size_t len,
// as this is timing sensitive and we may have spent some time decoding,
// update our global "now" timestamp
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
// keep track of RTP timestamps and real clock. look at the last packet we received
// and update our sync TS.
@ -1058,8 +1058,8 @@ void media_player_add_packet(struct media_player *mp, char *buf, size_t len,
ilog(LOG_ERR, "Error sending playback media to RTP sink");
mutex_unlock(&mp->sink->out_lock);
timeval_add_usec(&mp->next_run, us_dur);
timerthread_obj_schedule_abs(&mp->tt_obj, &mp->next_run);
mp->next_run = timeval_add_usec(mp->next_run, us_dur);
timerthread_obj_schedule_abs(&mp->tt_obj, mp->next_run);
}
static int media_player_find_file_begin(struct media_player *mp) {
@ -1144,7 +1144,7 @@ void media_player_set_media(struct media_player *mp, struct call_media *media) {
}
if (!mp->ssrc_out || mp->ssrc_out->parent->h.ssrc != mp->ssrc) {
struct ssrc_ctx *ssrc_ctx = get_ssrc_ctx(mp->ssrc, &media->ssrc_hash, SSRC_DIR_OUTPUT);
ssrc_ctx->next_rtcp = rtpe_now;
ssrc_ctx->next_rtcp = timeval_from_us(rtpe_now);
mp->ssrc_out = ssrc_ctx;
}
}
@ -1207,9 +1207,9 @@ static bool media_player_play_start(struct media_player *mp, const rtp_payload_t
if (media_player_cache_entry_init(mp, dst_pt, codec_set))
return true;
mp->next_run = rtpe_now;
mp->next_run = timeval_from_us(rtpe_now);
// give ourselves a bit of a head start with decoding
timeval_add_usec(&mp->next_run, -50000);
mp->next_run = timeval_add_usec(mp->next_run, -50000);
// if start_pos is positive, try to seek to that position
if (mp->opts.start_pos > 0) {
@ -1253,7 +1253,7 @@ static struct media_player_media_file *media_player_media_file_new(str blob) {
fo->blob = blob;
fo->blob.dup = call_ref; // string is allocated by reference on `fo`
RTPE_GAUGE_ADD(media_cache, blob.len);
fo->atime = fo->mtime = rtpe_now.tv_sec;
fo->atime = fo->mtime = timeval_from_us(rtpe_now).tv_sec;
return fo;
}
@ -1302,7 +1302,7 @@ static struct media_player_media_file *media_player_media_files_get_only(const s
return NULL;
obj_hold(fo);
fo->atime = rtpe_now.tv_sec;
fo->atime = timeval_from_us(rtpe_now).tv_sec;
}
return fo;
@ -1321,7 +1321,7 @@ static struct media_player_media_file *media_player_db_id_get_only(unsigned long
return NULL;
obj_hold(fo);
fo->atime = rtpe_now.tv_sec;
fo->atime = timeval_from_us(rtpe_now).tv_sec;
}
return fo;
@ -2764,7 +2764,7 @@ static void media_player_expire_files(void) {
if (rtpe_config.media_expire <= 0)
return;
time_t limit = rtpe_now.tv_sec - rtpe_config.media_expire;
time_t limit = timeval_from_us(rtpe_now).tv_sec - rtpe_config.media_expire;
unsigned int num = 0;
{
@ -2795,7 +2795,7 @@ static void media_player_expire_dbs(void) {
if (rtpe_config.db_expire <= 0)
return;
time_t limit = rtpe_now.tv_sec - rtpe_config.db_expire;
time_t limit = timeval_from_us(rtpe_now).tv_sec - rtpe_config.db_expire;
unsigned int num = 0;
{
@ -2826,7 +2826,7 @@ static void media_player_expire_cache_entry(unsigned long long id, unsigned int
time_t mtime, atime;
if (!media_player_get_cache_times(id, &mtime, &atime))
return;
time_t limit = rtpe_now.tv_sec - rtpe_config.db_expire;
time_t limit = timeval_from_us(rtpe_now).tv_sec - rtpe_config.db_expire;
if (atime >= limit)
return;
if (media_player_evict_cache(id))

@ -1878,7 +1878,7 @@ void kernelize(struct packet_stream *stream) {
g_free(redi);
}
stream->kernel_time = rtpe_now.tv_sec;
stream->kernel_time = timeval_from_us(rtpe_now).tv_sec;
PS_SET(stream, KERNELIZED);
return;
@ -1886,7 +1886,7 @@ no_kernel_warn:
ilog(LOG_WARNING, "No support for kernel packet forwarding available (%s)", nk_warn_msg);
no_kernel:
PS_SET(stream, KERNELIZED);
stream->kernel_time = rtpe_now.tv_sec;
stream->kernel_time = timeval_from_us(rtpe_now).tv_sec;
PS_SET(stream, NO_KERNEL_SUPPORT);
}
@ -2508,7 +2508,7 @@ static bool media_packet_address_check(struct packet_handler_ctx *phc)
/* wait at least 3 seconds after last signal before committing to a particular
* endpoint address */
bool wait_time = false;
if (!phc->mp.call->last_signal || rtpe_now.tv_sec <= phc->mp.call->last_signal + 3)
if (!phc->mp.call->last_signal || timeval_from_us(rtpe_now).tv_sec <= phc->mp.call->last_signal + 3)
wait_time = true;
const struct endpoint *use_endpoint_confirm = &phc->mp.fsin;
@ -2521,9 +2521,9 @@ static bool media_packet_address_check(struct packet_handler_ctx *phc)
&& phc->mp.stream->advertised_endpoint.port)
{
// check if we need to reset our learned endpoints
if (memcmp(&rtpe_now, &phc->mp.stream->ep_detect_signal, sizeof(rtpe_now))) {
if (timeval_diff(timeval_from_us(rtpe_now), phc->mp.stream->ep_detect_signal) != 0) {
memset(&phc->mp.stream->detected_endpoints, 0, sizeof(phc->mp.stream->detected_endpoints));
phc->mp.stream->ep_detect_signal = rtpe_now;
phc->mp.stream->ep_detect_signal = timeval_from_us(rtpe_now);
}
// possible endpoints that can be detected in order of preference:
@ -2856,7 +2856,7 @@ static int stream_packet(struct packet_handler_ctx *phc) {
atomic64_add_na(&phc->mp.stream->stats_in->bytes, phc->s.len);
atomic64_inc_na(&phc->mp.sfd->local_intf->stats->in.packets);
atomic64_add_na(&phc->mp.sfd->local_intf->stats->in.bytes, phc->s.len);
atomic64_set(&phc->mp.stream->last_packet, rtpe_now.tv_sec);
atomic64_set(&phc->mp.stream->last_packet, timeval_from_us(rtpe_now).tv_sec);
RTPE_STATS_INC(packets_user);
RTPE_STATS_ADD(bytes_user, phc->s.len);
@ -3408,7 +3408,7 @@ void interface_sampled_rate_stats_destroy(struct interface_sampled_rate_stats *s
g_hash_table_destroy(s->ht);
}
struct interface_stats_block *interface_sampled_rate_stats_get(struct interface_sampled_rate_stats *s,
struct local_intf *lif, long long *time_diff_us)
struct local_intf *lif, int64_t *time_diff_us)
{
if (!s)
return NULL;
@ -3418,9 +3418,9 @@ struct interface_stats_block *interface_sampled_rate_stats_get(struct interface_
g_hash_table_insert(s->ht, lif, ret);
}
if (ret->last_run.tv_sec)
*time_diff_us = timeval_diff(&rtpe_now, &ret->last_run);
*time_diff_us = timeval_diff(timeval_from_us(rtpe_now), ret->last_run);
else
*time_diff_us = 0;
ret->last_run = rtpe_now;
ret->last_run = timeval_from_us(rtpe_now);
return &ret->stats;
}

@ -249,7 +249,7 @@ static void mqtt_ssrc_stats(struct ssrc_ctx *ssrc, JsonBuilder *json, struct cal
duplicates = sc->duplicates;
// process per-second stats
uint64_t cur_ts = ssrc_timeval_to_ts(&rtpe_now);
uint64_t cur_ts = ssrc_timeval_to_ts(timeval_from_us(rtpe_now));
uint64_t last_sample;
int64_t sample_packets, sample_octets, sample_packets_lost, sample_duplicates;
@ -274,8 +274,8 @@ static void mqtt_ssrc_stats(struct ssrc_ctx *ssrc, JsonBuilder *json, struct cal
if (last_sample && last_sample != cur_ts) {
// calc sample rates with primitive math
struct timeval last_sample_ts = ssrc_ts_to_timeval(last_sample);
double usecs_diff = (double) timeval_diff(&rtpe_now, &last_sample_ts);
struct timeval last_sample_ts = ssrc_ts_to_timeval(last_sample); // XXX
double usecs_diff = (double) timeval_diff(timeval_from_us(rtpe_now), last_sample_ts); // XXX
// adjust samples
packets -= sample_packets;
@ -533,7 +533,7 @@ INLINE JsonBuilder *__mqtt_timer_intro(void) {
json_builder_begin_object(json);
json_builder_set_member_name(json, "timestamp");
json_builder_add_double_value(json, (double) rtpe_now.tv_sec + (double) rtpe_now.tv_usec / 1000000.0);
json_builder_add_double_value(json, (double) timeval_from_us(rtpe_now).tv_sec + (double) timeval_from_us(rtpe_now).tv_usec / 1000000.0); // XXX
return json;
}

@ -563,7 +563,7 @@ static void rec_pcap_meta_finish_file(call_t *call) {
// Print start timestamp and end timestamp
// YYYY-MM-DDThh:mm:ss
time_t start = call->created.tv_sec;
time_t end = rtpe_now.tv_sec;
time_t end = timeval_from_us(rtpe_now).tv_sec;
char timebuffer[20];
struct tm timeinfo;
struct timeval *terminate;
@ -726,7 +726,7 @@ static void stream_pcap_dump(struct media_packet *mp, const str *s) {
// Set up PCAP packet header
struct pcap_pkthdr header;
ZERO(header);
header.ts = rtpe_now;
header.ts = timeval_from_us(rtpe_now);
header.caplen = pkt_len;
header.len = pkt_len;

@ -801,7 +801,7 @@ void redis_delete_async_loop(void *d) {
return ;
}
r->async_last = rtpe_now.tv_sec;
r->async_last = timeval_from_us(rtpe_now).tv_sec;
// init libevent for pthread usage
if (evthread_use_pthreads() < 0) {
@ -824,7 +824,7 @@ void redis_delete_async_loop(void *d) {
void redis_notify_loop(void *d) {
int seconds = 1, redis_notify_return = 0;
time_t next_run = rtpe_now.tv_sec;
time_t next_run = timeval_from_us(rtpe_now).tv_sec;
struct redis *r;
r = rtpe_redis_notify;
@ -856,13 +856,13 @@ void redis_notify_loop(void *d) {
// loop redis_notify => in case of lost connection
while (!rtpe_shutdown) {
gettimeofday(&rtpe_now, NULL);
if (rtpe_now.tv_sec < next_run) {
rtpe_now = now_us();
if (timeval_from_us(rtpe_now).tv_sec < next_run) {
usleep(100000);
continue;
}
next_run = rtpe_now.tv_sec + seconds;
next_run = timeval_from_us(rtpe_now).tv_sec + seconds;
if (redis_check_conn(r) == REDIS_STATE_CONNECTED || redis_notify_return < 0) {
r->async_ctx = NULL;
@ -957,7 +957,7 @@ static void redis_count_err_and_disable(struct redis *r)
r->consecutive_errors++;
if (r->consecutive_errors > allowed_errors) {
r->restore_tick = rtpe_now.tv_sec + disable_time;
r->restore_tick = timeval_from_us(rtpe_now).tv_sec + disable_time;
ilog(LOG_WARNING, "Redis server %s disabled for %d seconds",
endpoint_print_buf(&r->endpoint),
disable_time);
@ -966,11 +966,11 @@ static void redis_count_err_and_disable(struct redis *r)
/* must be called with r->lock held */
static int redis_check_conn(struct redis *r) {
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us(); // XXX this needed here?
if ((r->state == REDIS_STATE_DISCONNECTED) && (r->restore_tick > rtpe_now.tv_sec)) {
if ((r->state == REDIS_STATE_DISCONNECTED) && (r->restore_tick > timeval_from_us(rtpe_now).tv_sec)) {
ilog(LOG_WARNING, "Redis server '%s' is disabled. Don't try RE-Establishing for %" TIME_T_INT_FMT " more seconds",
r->hostname, r->restore_tick - rtpe_now.tv_sec);
r->hostname, r->restore_tick - timeval_from_us(rtpe_now).tv_sec);
return REDIS_STATE_DISCONNECTED;
}
@ -2261,7 +2261,7 @@ static void restore_thread(void *call_p, void *ctx_p) {
r = g_queue_pop_head(&ctx->r_q);
mutex_unlock(&ctx->r_m);
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
json_restore_call(r, &callid, ctx->foreign);
mutex_lock(&ctx->r_m);
@ -2456,8 +2456,8 @@ static str redis_encode_json(ng_parser_ctx_t *ctx, call_t *c, void **to_free) {
parser_arg inner = parser->dict_add_dict(root, "json");
{
JSON_SET_SIMPLE("created","%lli", timeval_us(&c->created));
JSON_SET_SIMPLE("destroyed","%lli", timeval_us(&c->destroyed));
JSON_SET_SIMPLE("created","%" PRId64, timeval_us(c->created));
JSON_SET_SIMPLE("destroyed","%" PRId64, timeval_us(c->destroyed));
JSON_SET_SIMPLE("last_signal","%ld", (long int) c->last_signal);
JSON_SET_SIMPLE("tos","%u", (int) c->tos);
JSON_SET_SIMPLE("deleted","%ld", (long int) c->deleted);
@ -2778,7 +2778,7 @@ void redis_update_onekey(call_t *c, struct redis *r) {
if (redis_check_conn(r) == REDIS_STATE_DISCONNECTED)
return;
atomic64_set_na(&c->last_redis_update, rtpe_now.tv_sec);
atomic64_set_na(&c->last_redis_update, timeval_from_us(rtpe_now).tv_sec);
rwlock_lock_r(&c->master_lock);

@ -1266,19 +1266,19 @@ static void logging_destroy(struct rtcp_process_ctx *ctx) {
static void mos_sr(struct rtcp_process_ctx *ctx, struct sender_report_packet *sr) {
ssrc_sender_report(ctx->mp->media, &ctx->scratch.sr, &ctx->mp->tv);
ssrc_sender_report(ctx->mp->media, &ctx->scratch.sr, ctx->mp->tv);
}
static void mos_rr(struct rtcp_process_ctx *ctx, struct report_block *rr) {
ssrc_receiver_report(ctx->mp->media, ctx->mp->sfd, &ctx->scratch.rr, &ctx->mp->tv);
ssrc_receiver_report(ctx->mp->media, ctx->mp->sfd, &ctx->scratch.rr, ctx->mp->tv);
}
static void mos_xr_rr_time(struct rtcp_process_ctx *ctx, const struct xr_rb_rr_time *rr) {
ssrc_receiver_rr_time(ctx->mp->media, &ctx->scratch.xr_rr, &ctx->mp->tv);
ssrc_receiver_rr_time(ctx->mp->media, &ctx->scratch.xr_rr, ctx->mp->tv);
}
static void mos_xr_dlrr(struct rtcp_process_ctx *ctx, const struct xr_rb_dlrr *dlrr) {
ssrc_receiver_dlrr(ctx->mp->media, &ctx->scratch.xr_dlrr, &ctx->mp->tv);
ssrc_receiver_dlrr(ctx->mp->media, &ctx->scratch.xr_dlrr, ctx->mp->tv);
}
static void mos_xr_voip_metrics(struct rtcp_process_ctx *ctx, const struct xr_rb_voip_metrics *rb_voip_mtc) {
ssrc_voip_metrics(ctx->mp->media, &ctx->scratch.xr_vm, &ctx->mp->tv);
ssrc_voip_metrics(ctx->mp->media, &ctx->scratch.xr_vm, ctx->mp->tv);
}
@ -1421,8 +1421,8 @@ static GString *rtcp_sender_report(struct ssrc_sender_report *ssr,
.rtcp.header.version = 2,
.rtcp.header.pt = RTCP_PT_SR,
.rtcp.ssrc = htonl(ssrc),
.ntp_msw = htonl(rtpe_now.tv_sec + 2208988800),
.ntp_lsw = htonl((4294967295ULL * rtpe_now.tv_usec) / 1000000ULL),
.ntp_msw = htonl(timeval_from_us(rtpe_now).tv_sec + 2208988800),
.ntp_lsw = htonl((4294967295ULL * timeval_from_us(rtpe_now).tv_usec) / 1000000ULL),
.timestamp = htonl(ts), // XXX calculate from rtpe_now instead
.packet_count = htonl(packets),
.octet_count = htonl(octets),
@ -1430,8 +1430,8 @@ static GString *rtcp_sender_report(struct ssrc_sender_report *ssr,
if (ssr) {
*ssr = (struct ssrc_sender_report) {
.ssrc = ssrc_out,
.ntp_msw = rtpe_now.tv_sec + 2208988800,
.ntp_lsw = (4294967295ULL * rtpe_now.tv_usec) / 1000000ULL,
.ntp_msw = timeval_from_us(rtpe_now).tv_sec + 2208988800,
.ntp_lsw = (4294967295ULL * timeval_from_us(rtpe_now).tv_usec) / 1000000ULL,
.timestamp = ts, // XXX calculate from rtpe_now instead
.packet_count = packets,
.octet_count = octets,
@ -1455,7 +1455,7 @@ static GString *rtcp_sender_report(struct ssrc_sender_report *ssr,
mutex_lock(&se->h.lock);
if (se->sender_reports.length) {
struct ssrc_time_item *si = se->sender_reports.tail->data;
tv_diff = timeval_diff(&rtpe_now, &si->received);
tv_diff = timeval_diff(timeval_from_us(rtpe_now), si->received);
ntp_middle_bits = si->ntp_middle_bits;
}
uint32_t jitter = se->jitter;
@ -1609,10 +1609,10 @@ void rtcp_send_report(struct call_media *media, struct ssrc_ctx *ssrc_out) {
struct packet_stream *sink = sh->sink;
struct call_media *other_media = sink->media;
ssrc_sender_report(other_media, &ssr, &rtpe_now);
ssrc_sender_report(other_media, &ssr, timeval_from_us(rtpe_now));
for (GList *k = srrs.head; k; k = k->next) {
struct ssrc_receiver_report *srr = k->data;
ssrc_receiver_report(other_media, sink->selected_sfd, srr, &rtpe_now);
ssrc_receiver_report(other_media, sink->selected_sfd, srr, timeval_from_us(rtpe_now));
}
}
while (srrs.length) {

@ -32,7 +32,7 @@ static void init_ssrc_ctx(struct ssrc_ctx *c, struct ssrc_entry_call *parent) {
while (!c->ssrc_map_out)
c->ssrc_map_out = ssl_random();
c->seq_out = ssl_random();
atomic64_set_na(&c->last_sample, ssrc_timeval_to_ts(&rtpe_now));
atomic64_set_na(&c->last_sample, ssrc_timeval_to_ts(timeval_from_us(rtpe_now)));
c->stats = bufferpool_alloc0(shm_bufferpool, sizeof(*c->stats));
}
static void init_ssrc_entry(struct ssrc_entry *ent, uint32_t ssrc) {
@ -308,14 +308,14 @@ struct ssrc_ctx *get_ssrc_ctx(uint32_t ssrc, struct ssrc_hash *ht, enum ssrc_dir
static void *__do_time_report_item(struct call_media *m, size_t struct_size, size_t reports_queue_offset,
const struct timeval *tv, uint32_t ssrc, uint32_t ntp_msw, uint32_t ntp_lsw,
const struct timeval tv, uint32_t ssrc, uint32_t ntp_msw, uint32_t ntp_lsw,
GDestroyNotify free_func, struct ssrc_entry **e_p)
{
struct ssrc_entry *e;
struct ssrc_time_item *sti;
sti = g_malloc0(struct_size);
sti->received = *tv;
sti->received = tv;
sti->ntp_middle_bits = ntp_msw << 16 | ntp_lsw >> 16;
sti->ntp_ts = ntp_ts_to_double(ntp_msw, ntp_lsw);
@ -356,7 +356,7 @@ static struct ssrc_entry_call *hunt_ssrc(struct call_media *media, uint32_t ssrc
#define calc_rtt(m, ...) \
__calc_rtt(m, (struct crtt_args) {__VA_ARGS__})
static long long __calc_rtt(struct call_media *m, struct crtt_args a)
static int64_t __calc_rtt(struct call_media *m, struct crtt_args a)
{
if (a.pt_p)
*a.pt_p = -1;
@ -398,12 +398,12 @@ static long long __calc_rtt(struct call_media *m, struct crtt_args a)
found:;
// `e` remains locked for access to `sti`
long long rtt = timeval_diff(a.tv, &sti->received);
int64_t rtt = timeval_diff(a.tv, sti->received);
mutex_unlock(&e->h.lock);
rtt -= (long long) a.delay * 1000000LL / 65536LL;
ilog(LOG_DEBUG, "Calculated round-trip time for %s%x%s is %lli us", FMT_M(a.ssrc), rtt);
rtt -= (int64_t) a.delay * 1000000LL / 65536LL;
ilog(LOG_DEBUG, "Calculated round-trip time for %s%x%s is %" PRId64 " us", FMT_M(a.ssrc), rtt);
if (rtt <= 0 || rtt > 10000000) {
ilog(LOG_DEBUG, "Invalid RTT - discarding");
@ -418,7 +418,7 @@ found:;
}
void ssrc_sender_report(struct call_media *m, const struct ssrc_sender_report *sr,
const struct timeval *tv)
const struct timeval tv)
{
struct ssrc_entry *e;
struct ssrc_sender_report_item *seri = __do_time_report_item(m, sizeof(*seri),
@ -437,7 +437,7 @@ void ssrc_sender_report(struct call_media *m, const struct ssrc_sender_report *s
obj_put(e);
}
void ssrc_receiver_report(struct call_media *m, stream_fd *sfd, const struct ssrc_receiver_report *rr,
const struct timeval *tv)
const struct timeval tv)
{
ilog(LOG_DEBUG, "RR from %s%x%s about %s%x%s: FL %u TL %u HSR %u J %u LSR %u DLSR %u",
FMT_M(rr->from), FMT_M(rr->ssrc), rr->fraction_lost, rr->packets_lost,
@ -445,7 +445,7 @@ void ssrc_receiver_report(struct call_media *m, stream_fd *sfd, const struct ssr
int pt;
long long rtt = calc_rtt(m,
int64_t rtt = calc_rtt(m,
.ht = &m->ssrc_hash,
.tv = tv,
.pt_p = &pt,
@ -474,16 +474,16 @@ void ssrc_receiver_report(struct call_media *m, stream_fd *sfd, const struct ssr
ilog(LOG_DEBUG, "Adding opposide side RTT of %u us", other_e->last_rtt);
long long rtt_end2end = other_e->last_rtt ? (rtt + other_e->last_rtt) : 0;
int64_t rtt_end2end = other_e->last_rtt ? (rtt + other_e->last_rtt) : 0;
if (other_e->last_rtt_xr > 0) { // use the RTT from RTCP-XR (in ms)
rtt_end2end = (long long) other_e->last_rtt_xr * 1000LL;
rtt_end2end = (int64_t) other_e->last_rtt_xr * 1000LL;
}
struct ssrc_stats_block *ssb = g_new(__typeof(*ssb), 1);
*ssb = (struct ssrc_stats_block) {
.jitter = jitter,
.rtt = rtt_end2end,
.rtt_leg = rtt,
.reported = *tv,
.reported = tv,
.packetloss = (unsigned int) rr->fraction_lost * 100 / 256,
};
@ -515,7 +515,7 @@ void ssrc_receiver_report(struct call_media *m, stream_fd *sfd, const struct ssr
// discard stats block if last has been received less than a second ago
if (G_LIKELY(other_e->stats_blocks.length > 0)) {
struct ssrc_stats_block *last_ssb = g_queue_peek_tail(&other_e->stats_blocks);
if (G_UNLIKELY(timeval_diff(tv, &last_ssb->reported) < 1000000)) {
if (G_UNLIKELY(timeval_diff(tv, last_ssb->reported) < 1000000LL)) {
free_stats_block(ssb);
goto out_ul_oe;
}
@ -552,7 +552,7 @@ out_nl:
}
void ssrc_receiver_rr_time(struct call_media *m, const struct ssrc_xr_rr_time *rr,
const struct timeval *tv)
const struct timeval tv)
{
struct ssrc_entry *e;
struct ssrc_rr_time_item *srti = __do_time_report_item(m, sizeof(*srti),
@ -570,7 +570,7 @@ void ssrc_receiver_rr_time(struct call_media *m, const struct ssrc_xr_rr_time *r
}
void ssrc_receiver_dlrr(struct call_media *m, const struct ssrc_xr_dlrr *dlrr,
const struct timeval *tv)
const struct timeval tv)
{
ilog(LOG_DEBUG, "XR DLRR from %s%x%s about %s%x%s: LRR %u DLRR %u",
FMT_M(dlrr->from), FMT_M(dlrr->ssrc),
@ -587,7 +587,7 @@ void ssrc_receiver_dlrr(struct call_media *m, const struct ssrc_xr_dlrr *dlrr,
}
void ssrc_voip_metrics(struct call_media *m, const struct ssrc_xr_voip_metrics *vm,
const struct timeval *tv)
const struct timeval tv)
{
ilog(LOG_DEBUG, "XR VM from %s%x%s about %s%x%s: LR %u DR %u BD %u GD %u BDu %u GDu %u RTD %u "
"ESD %u SL %u NL %u RERL %u GMin %u R %u eR %u MOSL %u MOSC %u RX %u "

@ -183,8 +183,8 @@ found:;
RTPE_STATS_INC(managed_sess);
if (!c->destroyed.tv_sec)
c->destroyed = rtpe_now;
long long duration = timeval_diff(&c->destroyed, &c->created);
c->destroyed = timeval_from_us(rtpe_now);
int64_t duration = timeval_diff(c->destroyed, c->created);
RTPE_STATS_ADD(call_duration, duration);
duration /= 1000; // millisecond precision for the squared value to avoid overflows
RTPE_STATS_ADD(call_duration2, duration * duration);
@ -387,7 +387,7 @@ stats_metric_q *statistics_gather_metrics(struct interface_sampled_rate_stats *i
HEADER("totalstatistics", "Total statistics (does not include current running sessions):");
HEADER("{", "");
METRIC("uptime", "Uptime of rtpengine", "%llu", "%llu seconds", (long long) timeval_diff(&rtpe_now, &rtpe_started) / 1000000);
METRIC("uptime", "Uptime of rtpengine", "%" PRId64, "%" PRId64 " seconds", timeval_diff(timeval_from_us(rtpe_now), rtpe_started) / 1000000);
PROM("uptime_seconds", "gauge");
METRIC("managedsessions", "Total managed sessions", "%" PRIu64, "%" PRIu64, num_sessions);
@ -697,7 +697,7 @@ stats_metric_q *statistics_gather_metrics(struct interface_sampled_rate_stats *i
#undef F
// expected to be single thread only, so no locking
long long time_diff_us;
int64_t time_diff_us;
struct interface_stats_block *intv_stats
= interface_sampled_rate_stats_get(interface_rate_stats, lif, &time_diff_us);
@ -880,7 +880,7 @@ stats_metric_q *statistics_gather_metrics(struct interface_sampled_rate_stats *i
HEADER("transcoders", NULL);
HEADER("[", "");
int last_tv_sec = rtpe_now.tv_sec - 1;
int last_tv_sec = timeval_from_us(rtpe_now).tv_sec - 1;
unsigned int idx = last_tv_sec & 1;
codec_stats_ht_iter iter;
@ -1034,11 +1034,11 @@ enum thread_looper_action call_rate_stats_updater(void) {
stats_rate_min_max(&rtpe_rate_graphite_min_max, &rtpe_stats_rate);
if (last_run.tv_sec) { /* `stats_counters_calc_rate()` shouldn't be called on the very first cycle */
long long run_diff_us = timeval_diff(&rtpe_now, &last_run);
int64_t run_diff_us = timeval_diff(timeval_from_us(rtpe_now), last_run);
stats_counters_calc_rate(rtpe_stats, run_diff_us, &rtpe_stats_intv, &rtpe_stats_rate);
}
last_run = rtpe_now;
last_run = timeval_from_us(rtpe_now);
return TLA_CONTINUE;
}

@ -273,8 +273,8 @@ static bool t38_pcm_player(struct media_player *mp) {
if (num <= 0) {
ilog(LOG_DEBUG, "No T.38 PCM samples generated");
// use a fixed interval of 10 ms
timeval_add_usec(&mp->next_run, 10000);
timerthread_obj_schedule_abs(&mp->tt_obj, &mp->next_run);
mp->next_run = timeval_add_usec(mp->next_run, 10000);
timerthread_obj_schedule_abs(&mp->tt_obj, mp->next_run);
mutex_unlock(&tg->lock);
return false;
}
@ -288,7 +288,7 @@ static bool t38_pcm_player(struct media_player *mp) {
tg->pts += num;
// handle fill-in
if (timeval_diff(&rtpe_now, &tg->last_rx_ts) > 30000) {
if (timeval_diff(timeval_from_us(rtpe_now), tg->last_rx_ts) > 30000) {
ilog(LOG_DEBUG, "Adding T.38 fill-in samples");
t38_gateway_rx_fillin(tg->gw, 80);
}
@ -504,8 +504,8 @@ void t38_gateway_start(struct t38_gateway *tg, str_case_value_ht codec_set) {
ilog(LOG_DEBUG, "Starting T.38 PCM player");
// start off PCM player
tg->pcm_player->next_run = rtpe_now;
timerthread_obj_schedule_abs(&tg->pcm_player->tt_obj, &tg->pcm_player->next_run);
tg->pcm_player->next_run = timeval_from_us(rtpe_now);
timerthread_obj_schedule_abs(&tg->pcm_player->tt_obj, tg->pcm_player->next_run);
}
@ -525,7 +525,7 @@ int t38_gateway_input_samples(struct t38_gateway *tg, int16_t amp[], int len) {
ilog(LOG_WARN | LOG_FLAG_LIMIT, "%i PCM samples were not processed by the T.38 gateway",
left);
tg->last_rx_ts = rtpe_now;
tg->last_rx_ts = timeval_from_us(rtpe_now);
mutex_unlock(&tg->lock);

@ -57,14 +57,14 @@ static void timerthread_run(void *p) {
struct thread_waker waker = { .lock = &tt->lock, .cond = &tt->cond };
thread_waker_add(&waker);
long long accuracy = rtpe_config.timer_accuracy;
int64_t accuracy = rtpe_config.timer_accuracy;
mutex_lock(&tt->lock);
while (!rtpe_shutdown) {
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
long long sleeptime = 10000000;
int64_t sleeptime = 10000000;
// find the first element if we haven't determined it yet
struct timerthread_obj *tt_obj = tt->obj;
if (!tt_obj) {
@ -78,7 +78,7 @@ static void timerthread_run(void *p) {
}
// scheduled to run? if not, then we remember this object/reference and go to sleep
sleeptime = timeval_diff(&tt_obj->next_check, &rtpe_now);
sleeptime = timeval_diff(tt_obj->next_check, timeval_from_us(rtpe_now));
if (sleeptime > accuracy) {
tt->obj = tt_obj;
@ -86,9 +86,9 @@ static void timerthread_run(void *p) {
}
// pretend we're running exactly at the scheduled time
rtpe_now = tt_obj->next_check;
rtpe_now = timeval_us(tt_obj->next_check);
ZERO(tt_obj->next_check);
tt_obj->last_run = rtpe_now;
tt_obj->last_run = timeval_from_us(rtpe_now);
ZERO(tt->next_wake);
tt->obj = NULL;
mutex_unlock(&tt->lock);
@ -107,8 +107,8 @@ sleep:
/* figure out how long we should sleep */
sleeptime = MIN(10000000, sleeptime);
sleep_now:;
struct timeval tv = rtpe_now;
timeval_add_usec(&tv, sleeptime);
struct timeval tv = timeval_from_us(rtpe_now);
tv = timeval_add_usec(tv, sleeptime);
tt->next_wake = tv;
cond_timedwait(&tt->cond, &tt->lock, &tv);
}
@ -122,7 +122,7 @@ void timerthread_launch(struct timerthread *tt, const char *scheduler, int prio,
thread_create_detach_prio(timerthread_run, &tt->threads[i], scheduler, prio, name);
}
void timerthread_obj_schedule_abs_nl(struct timerthread_obj *tt_obj, const struct timeval *tv) {
void timerthread_obj_schedule_abs_nl(struct timerthread_obj *tt_obj, const struct timeval tv) {
if (!tt_obj)
return;
struct timerthread_thread *tt = tt_obj->thread;
@ -130,7 +130,7 @@ void timerthread_obj_schedule_abs_nl(struct timerthread_obj *tt_obj, const struc
//ilog(LOG_DEBUG, "scheduling timer object at %llu.%06lu", (unsigned long long) tv->tv_sec,
//(unsigned long) tv->tv_usec);
if (tt_obj->next_check.tv_sec && timeval_cmp(&tt_obj->next_check, tv) <= 0)
if (tt_obj->next_check.tv_sec && timeval_cmp(tt_obj->next_check, tv) <= 0)
return; /* already scheduled sooner */
if (!g_tree_remove(tt->tree, tt_obj)) {
if (tt->obj == tt_obj)
@ -138,10 +138,10 @@ void timerthread_obj_schedule_abs_nl(struct timerthread_obj *tt_obj, const struc
else
obj_hold(tt_obj); /* if it wasn't removed, we make a new reference */
}
tt_obj->next_check = *tv;
tt_obj->next_check = tv;
g_tree_insert(tt->tree, tt_obj, tt_obj);
// need to wake the thread?
if (tt->next_wake.tv_sec && timeval_cmp(tv, &tt->next_wake) < 0) {
if (tt->next_wake.tv_sec && timeval_cmp(tv, tt->next_wake) < 0) {
// make sure we can get picked first: move pre-picked object back into tree
if (tt->obj && tt->obj != tt_obj) {
g_tree_insert(tt->tree, tt->obj, tt->obj);
@ -179,8 +179,8 @@ nope:
static int timerthread_queue_run_one(struct timerthread_queue *ttq,
struct timerthread_queue_entry *ttqe,
void (*run_func)(struct timerthread_queue *, void *)) {
if (ttqe->when.tv_sec && timeval_cmp(&ttqe->when, &rtpe_now) > 0) {
if(timeval_diff(&ttqe->when, &rtpe_now) > 1000) // not to queue packet less than 1ms
if (ttqe->when.tv_sec && timeval_cmp(ttqe->when, timeval_from_us(rtpe_now)) > 0) {
if(timeval_diff(ttqe->when, timeval_from_us(rtpe_now)) > 1000) // not to queue packet less than 1ms
return -1; // not yet
}
run_func(ttq, ttqe);
@ -219,7 +219,7 @@ void timerthread_queue_run(void *ptr) {
mutex_unlock(&ttq->lock);
if (next_send.tv_sec)
timerthread_obj_schedule_abs(&ttq->tt_obj, &next_send);
timerthread_obj_schedule_abs(&ttq->tt_obj, next_send);
}
static int ttqe_free_all(void *k, void *v, void *d) {
@ -277,7 +277,7 @@ int __ttqe_find_last_idx(const void *a, const void *b) {
const struct timerthread_queue_entry *ttqe_a = a;
void **data = (void **) b;
const struct timerthread_queue_entry *ttqe_b = data[0];
int ret = timeval_cmp(&ttqe_b->when, &ttqe_a->when);
int ret = timeval_cmp(ttqe_b->when, ttqe_a->when);
if (ret)
return ret;
// same timestamp. track highest seen idx
@ -329,7 +329,7 @@ void timerthread_queue_push(struct timerthread_queue *ttq, struct timerthread_qu
// first packet in? we're probably not scheduled yet
if (first_ttqe == ttqe)
timerthread_obj_schedule_abs(&ttq->tt_obj, &tv_send);
timerthread_obj_schedule_abs(&ttq->tt_obj, tv_send);
}
static int ttqe_ptr_match(const void *ent, const void *ptr) {

@ -238,7 +238,7 @@ static void websocket_process(void *p, void *up) {
assert(wm != NULL);
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
const char *err = wm->func(wm);
// this may trigger a cleanup/free in another thread, which will then block until our
@ -835,7 +835,7 @@ static int websocket_http(struct lws *wsi, enum lws_callback_reasons reason, voi
{
ilogs(http, LOG_DEBUG, "http-only callback %i %p %p", reason, wsi, user);
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
switch (reason) {
case LWS_CALLBACK_PROTOCOL_INIT:
@ -907,7 +907,7 @@ static int websocket_protocol(struct lws *wsi, enum lws_callback_reasons reason,
ilogs(http, LOG_DEBUG, "Websocket protocol '%s' callback %i %p %p", name, reason, wsi, wc);
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
switch (reason) {
case LWS_CALLBACK_PROTOCOL_INIT:

@ -151,7 +151,7 @@ struct codec_handler *codec_handler_make_media_player(const rtp_payload_type *sr
str_case_value_ht codec_set);
struct codec_handler *codec_handler_make_dummy(const rtp_payload_type *dst_pt, struct call_media *media,
str_case_value_ht codec_set);
void codec_calc_jitter(struct ssrc_ctx *, unsigned long ts, unsigned int clockrate, const struct timeval *);
void codec_calc_jitter(struct ssrc_ctx *, unsigned long ts, unsigned int clockrate, const struct timeval);
void codec_update_all_handlers(struct call_monologue *ml);
void codec_update_all_source_handlers(struct call_monologue *ml, const sdp_ng_flags *flags);

@ -135,7 +135,7 @@ bool media_player_pt_match(const struct media_player *mp, const rtp_payload_type
const rtp_payload_type *dst_pt);
void media_player_add_packet(struct media_player *mp, char *buf, size_t len,
long long us_dur, unsigned long long pts);
int64_t us_dur, unsigned long long pts);
const char * call_play_media_for_ml(struct call_monologue *ml,
media_player_opts_t opts, sdp_ng_flags *flags);

@ -154,13 +154,13 @@ INLINE void interface_counter_calc_diff_dir(const struct interface_counter_stats
#include "interface_counter_stats_fields_dir.inc"
#undef F
}
INLINE void interface_counter_calc_rate_from_diff(long long run_diff_us,
INLINE void interface_counter_calc_rate_from_diff(int64_t run_diff_us,
struct interface_counter_stats *diff, struct interface_counter_stats *rate) {
#define F(x) atomic64_calc_rate_from_diff(run_diff_us, atomic64_get(&diff->x), &rate->x);
#include "interface_counter_stats_fields.inc"
#undef F
}
INLINE void interface_counter_calc_rate_from_diff_dir(long long run_diff_us,
INLINE void interface_counter_calc_rate_from_diff_dir(int64_t run_diff_us,
struct interface_counter_stats_dir *diff, struct interface_counter_stats_dir *rate) {
#define F(x) atomic64_calc_rate_from_diff(run_diff_us, atomic64_get(&diff->x), &rate->x);
#include "interface_counter_stats_fields_dir.inc"
@ -169,7 +169,7 @@ INLINE void interface_counter_calc_rate_from_diff_dir(long long run_diff_us,
void interface_sampled_rate_stats_init(struct interface_sampled_rate_stats *);
void interface_sampled_rate_stats_destroy(struct interface_sampled_rate_stats *);
struct interface_stats_block *interface_sampled_rate_stats_get(struct interface_sampled_rate_stats *s,
struct local_intf *lif, long long *time_diff_us);
struct local_intf *lif, int64_t *time_diff_us);
TYPED_GQUEUE(socket_port, struct socket_port_link)

@ -64,8 +64,8 @@ struct ssrc_ctx {
struct timeval next_rtcp; // for self-generated RTCP reports
};
INLINE uint64_t ssrc_timeval_to_ts(const struct timeval *tv) {
return (tv->tv_sec << 20) | tv->tv_usec;
INLINE uint64_t ssrc_timeval_to_ts(const struct timeval tv) {
return (tv.tv_sec << 20) | tv.tv_usec;
}
INLINE struct timeval ssrc_ts_to_timeval(uint64_t ts) {
return (struct timeval) { .tv_sec = ts >> 20, .tv_usec = ts & 0xfffff };
@ -196,8 +196,8 @@ struct ssrc_xr_voip_metrics {
struct crtt_args {
struct ssrc_hash *ht;
const struct timeval *tv;
int * pt_p;
const struct timeval tv;
int *pt_p;
uint32_t ssrc;
uint32_t ntp_middle_bits;
uint32_t delay;
@ -221,15 +221,15 @@ INLINE void *get_ssrc(uint32_t ssrc, struct ssrc_hash *ht) {
struct ssrc_ctx *get_ssrc_ctx(uint32_t, struct ssrc_hash *, enum ssrc_dir); // creates new entry if not found
void ssrc_sender_report(struct call_media *, const struct ssrc_sender_report *, const struct timeval *);
void ssrc_sender_report(struct call_media *, const struct ssrc_sender_report *, const struct timeval);
void ssrc_receiver_report(struct call_media *, stream_fd *, const struct ssrc_receiver_report *,
const struct timeval *);
const struct timeval);
void ssrc_receiver_rr_time(struct call_media *m, const struct ssrc_xr_rr_time *rr,
const struct timeval *);
const struct timeval);
void ssrc_receiver_dlrr(struct call_media *m, const struct ssrc_xr_dlrr *dlrr,
const struct timeval *);
const struct timeval);
void ssrc_voip_metrics(struct call_media *m, const struct ssrc_xr_voip_metrics *vm,
const struct timeval *);
const struct timeval);
void ssrc_collect_metrics(struct call_media *);

@ -180,7 +180,7 @@ enum thread_looper_action call_rate_stats_updater(void);
* Calculation of the call rate counters.
* If used with the `stats_rate_min_max()` must only be called in advance, so before that.
*/
INLINE void stats_counters_calc_rate(const struct global_stats_counter *stats, long long run_diff_us,
INLINE void stats_counters_calc_rate(const struct global_stats_counter *stats, int64_t run_diff_us,
struct global_stats_counter *intv, struct global_stats_counter *rate)
{
#define F(x) atomic64_calc_rate(&stats->x, run_diff_us, &intv->x, &rate->x);
@ -216,7 +216,7 @@ INLINE void stats_rate_min_max(struct global_rate_min_max *mm, struct global_sta
// sample running min/max from `mm` into `loc` and reset `mm` to zero.
// calculate average values in `loc` from `counter_diff` and `time_diff_us`
INLINE void stats_rate_min_max_avg_sample(struct global_rate_min_max *mm, struct global_rate_min_max_avg *loc,
long long run_diff_us, const struct global_stats_counter *counter_diff) {
int64_t run_diff_us, const struct global_stats_counter *counter_diff) {
#define F(x) STAT_MIN_MAX_AVG(x, mm, loc, run_diff_us, counter_diff)
#define FA(x, n) for (int i = 0; i < n; i++) { F(x[i]) }
#include "counter_stats_fields.inc"

@ -57,7 +57,7 @@ void timerthread_init(struct timerthread *, unsigned int, void (*)(void *));
void timerthread_free(struct timerthread *);
void timerthread_launch(struct timerthread *, const char *scheduler, int prio, const char *name);
void timerthread_obj_schedule_abs_nl(struct timerthread_obj *, const struct timeval *);
void timerthread_obj_schedule_abs_nl(struct timerthread_obj *, const struct timeval);
void timerthread_obj_deschedule(struct timerthread_obj *);
// run_now_func = called if newly inserted object can be processed immediately by timerthread_queue_push within its calling context
@ -79,7 +79,7 @@ INLINE struct timerthread_thread *timerthread_get_next(struct timerthread *tt) {
return &tt->threads[idx];
}
INLINE void timerthread_obj_schedule_abs(struct timerthread_obj *tt_obj, const struct timeval *tv) {
INLINE void timerthread_obj_schedule_abs(struct timerthread_obj *tt_obj, const struct timeval tv) {
if (!tt_obj)
return;
struct timerthread_thread *tt = tt_obj->thread;

@ -25,7 +25,7 @@ struct thread_buf {
};
struct rtpengine_common_config *rtpe_common_config_ptr;
__thread struct timeval rtpe_now;
__thread int64_t rtpe_now;
volatile bool rtpe_shutdown;
static __thread struct thread_buf t_bufs[NUM_THREAD_BUFS];
@ -548,7 +548,7 @@ int timeval_cmp_zero(const void *a, const void *b) {
if (A->tv_sec == 0 && B->tv_sec == 0)
return 0;
/* earlier timevals go first */
return timeval_cmp(A, B);
return timeval_cmp(*A, *B);
}
int timeval_cmp_ptr(const void *a, const void *b) {

@ -11,6 +11,7 @@
#include <sys/resource.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/time.h>
#define THREAD_BUF_SIZE 64
@ -85,7 +86,7 @@ struct rtpenging_config_callback {
/*** GLOBALS ***/
extern __thread struct timeval rtpe_now;
extern __thread int64_t rtpe_now;
extern volatile bool rtpe_shutdown;
@ -344,24 +345,37 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(rwlock_w_lock_t, rwlock_ptr_unlock_w)
/*** TIMEVAL FUNCTIONS ***/
INLINE long long timeval_us(const struct timeval *t) {
return (long long) ((long long) t->tv_sec * 1000000LL) + t->tv_usec;
__attribute__((warn_unused_result))
INLINE int64_t timeval_us(const struct timeval t) {
return ((int64_t) t.tv_sec * 1000000LL) + t.tv_usec;
}
INLINE struct timeval timeval_from_us(long long ms) {
__attribute__((warn_unused_result))
INLINE int64_t now_us(void) {
struct timeval now;
gettimeofday(&now, NULL);
return timeval_us(now);
}
__attribute__((warn_unused_result))
INLINE struct timeval timeval_from_us(int64_t ms) {
return (struct timeval) { .tv_sec = ms/1000000LL, .tv_usec = ms%1000000LL };
}
INLINE long long timeval_diff(const struct timeval *a, const struct timeval *b) {
__attribute__((warn_unused_result))
INLINE int64_t timeval_diff(const struct timeval a, const struct timeval b) {
return timeval_us(a) - timeval_us(b);
}
INLINE struct timeval timeval_subtract(const struct timeval *a, const struct timeval *b) {
__attribute__((warn_unused_result))
INLINE struct timeval timeval_subtract(const struct timeval a, const struct timeval b) {
return timeval_from_us(timeval_diff(a, b));
}
INLINE struct timeval timeval_add(const struct timeval *a, const struct timeval *b) {
__attribute__((warn_unused_result))
INLINE struct timeval timeval_add(const struct timeval a, const struct timeval b) {
return timeval_from_us(timeval_us(a) + timeval_us(b));
}
INLINE void timeval_add_usec(struct timeval *tv, long usec) {
*tv = timeval_from_us(timeval_us(tv) + usec);
__attribute__((warn_unused_result))
INLINE struct timeval timeval_add_usec(const struct timeval tv, int64_t usec) {
return timeval_from_us(timeval_us(tv) + usec);
}
__attribute__((warn_unused_result))
INLINE int long_cmp(long long a, long long b) {
if (a == b)
return 0;
@ -369,22 +383,28 @@ INLINE int long_cmp(long long a, long long b) {
return -1;
return 1;
}
INLINE int timeval_cmp(const struct timeval *a, const struct timeval *b) {
int r = long_cmp(a->tv_sec, b->tv_sec);
__attribute__((warn_unused_result))
INLINE int timeval_cmp(const struct timeval a, const struct timeval b) {
int r = long_cmp(a.tv_sec, b.tv_sec);
if (r != 0)
return r;
return long_cmp(a->tv_usec, b->tv_usec);
return long_cmp(a.tv_usec, b.tv_usec);
}
// as a GCompareFunc
__attribute__((warn_unused_result))
int timeval_cmp_zero(const void *a, const void *b);
__attribute__((warn_unused_result))
int timeval_cmp_ptr(const void *a, const void *b);
INLINE void timeval_lowest(struct timeval *l, const struct timeval *n) {
if (!n->tv_sec)
return;
if (!l->tv_sec || timeval_cmp(l, n) == 1)
*l = *n;
__attribute__((warn_unused_result))
INLINE struct timeval timeval_lowest(const struct timeval l, const struct timeval n) {
if (!n.tv_sec)
return l;
if (!l.tv_sec || timeval_cmp(l, n) == 1)
return n;
return l;
}
__attribute__((warn_unused_result))
INLINE double ntp_ts_to_double(uint32_t whole, uint32_t frac) {
return (double) whole + (double) frac / 4294967296.0;
}

@ -2932,7 +2932,7 @@ static void amr_bitrate_tracker(decoder_t *dec, unsigned int ft) {
return;
if (dec->avc.amr.tracker_end.tv_sec
&& timeval_cmp(&dec->avc.amr.tracker_end, &rtpe_now) >= 0) {
&& timeval_cmp(dec->avc.amr.tracker_end, timeval_from_us(rtpe_now)) >= 0) {
// analyse the data we gathered
int next_highest = -1;
int lowest_used = -1;
@ -2973,8 +2973,8 @@ static void amr_bitrate_tracker(decoder_t *dec, unsigned int ft) {
if (!dec->avc.amr.tracker_end.tv_sec) {
// init
ZERO(dec->avc.amr.bitrate_tracker);
dec->avc.amr.tracker_end = rtpe_now;
timeval_add_usec(&dec->avc.amr.tracker_end, dec->codec_options.amr.cmr_interval * 1000);
dec->avc.amr.tracker_end = timeval_from_us(rtpe_now);
dec->avc.amr.tracker_end = timeval_add_usec(dec->avc.amr.tracker_end, dec->codec_options.amr.cmr_interval * 1000);
}
dec->avc.amr.bitrate_tracker[ft]++;
@ -3000,17 +3000,17 @@ static int amr_decoder_input(decoder_t *dec, const str *data, GQueue *out) {
unsigned int cmr_int = cmr_chr[0] >> 4;
if (cmr_int != 15) {
decoder_event(dec, CE_AMR_CMR_RECV, GUINT_TO_POINTER(cmr_int));
dec->avc.amr.last_cmr = rtpe_now;
dec->avc.amr.last_cmr = timeval_from_us(rtpe_now);
}
else if (dec->codec_options.amr.mode_change_interval) {
// no CMR, check if we're due to do our own mode change
if (!dec->avc.amr.last_cmr.tv_sec) // start tracking now
dec->avc.amr.last_cmr = rtpe_now;
else if (timeval_diff(&rtpe_now, &dec->avc.amr.last_cmr)
dec->avc.amr.last_cmr = timeval_from_us(rtpe_now);
else if (timeval_diff(timeval_from_us(rtpe_now), dec->avc.amr.last_cmr)
>= (long long) dec->codec_options.amr.mode_change_interval * 1000) {
// switch up if we can
decoder_event(dec, CE_AMR_CMR_RECV, GUINT_TO_POINTER(0xffff));
dec->avc.amr.last_cmr = rtpe_now;
dec->avc.amr.last_cmr = timeval_from_us(rtpe_now);
}
}

@ -216,7 +216,7 @@ void thread_waker_del(struct thread_waker *);
void threads_join_all(bool cancel);
void thread_create_detach_prio(void (*)(void *), void *, const char *, int, const char *);
void thread_create_looper(enum thread_looper_action (*f)(void), const char *scheduler, int priority,
const char *name, long long);
const char *name, int64_t);
INLINE void thread_create_detach(void (*f)(void *), void *a, const char *name) {
thread_create_detach_prio(f, a, NULL, 0, name);
}

@ -250,11 +250,11 @@ static void mix_buffer_src_init_pos(struct mix_buffer *mb, mix_buffer_ssrc_sourc
static void mix_buff_src_shift_delay(struct mix_buffer *mb, mix_buffer_ssrc_source *src,
const struct timeval *last, const struct timeval *now)
const struct timeval last, const struct timeval now)
{
if (!last || !now)
if (!last.tv_sec || !now.tv_sec)
return;
long long diff_us = timeval_diff(now, last);
int64_t diff_us = timeval_diff(now, last);
if (diff_us <= 0)
return;
unsigned int samples = mb->clockrate * diff_us / 1000000;
@ -265,7 +265,7 @@ static void mix_buff_src_shift_delay(struct mix_buffer *mb, mix_buffer_ssrc_sour
// takes the difference between two time stamps into account, scaled to the given clock rate,
// to add an additional write-delay for a newly created source
bool mix_buffer_write_delay(struct mix_buffer *mb, uint32_t ssrc, const void *buf, unsigned int samples,
const struct timeval *last, const struct timeval *now)
const struct timeval last, const struct timeval now)
{
LOCK(&mb->lock);

@ -72,10 +72,10 @@ void mix_buffer_destroy(struct mix_buffer *);
void *mix_buffer_read_fast(struct mix_buffer *, unsigned int samples, unsigned int *size);
void mix_buffer_read_slow(struct mix_buffer *, void *outbuf, unsigned int samples);
bool mix_buffer_write_delay(struct mix_buffer *, uint32_t ssrc, const void *buf, unsigned int samples,
const struct timeval *, const struct timeval *);
const struct timeval, const struct timeval);
INLINE bool mix_buffer_write(struct mix_buffer *mb, uint32_t ssrc, const void *buf, unsigned int samples) {
return mix_buffer_write_delay(mb, ssrc, buf, samples, NULL, NULL);
return mix_buffer_write_delay(mb, ssrc, buf, samples, (struct timeval) {0,0}, (struct timeval) {0,0});
}

@ -199,7 +199,7 @@ static int poller_poll(struct poller *p, int timeout, struct epoll_event *evs, i
if (ret <= 0)
goto out;
gettimeofday(&rtpe_now, NULL);
rtpe_now = now_us();
for (i = 0; i < ret; i++) {
ev = &evs[i];

@ -38,7 +38,7 @@ struct streambuf *streambuf_new_ptr(struct poller *p, void *fd_ptr, const struct
b->buf = g_string_new("");
b->fd_ptr = fd_ptr;
b->poller = p;
b->active = rtpe_now.tv_sec;
b->active = timeval_from_us(rtpe_now).tv_sec;
b->funcs = funcs;
return b;
@ -79,7 +79,7 @@ int streambuf_writeable(struct streambuf *b) {
if (ret > 0) {
g_string_erase(b->buf, 0, ret);
b->active = rtpe_now.tv_sec;
b->active = timeval_from_us(rtpe_now).tv_sec;
}
if (ret != out) {
@ -118,7 +118,7 @@ int streambuf_readable(struct streambuf *b) {
}
g_string_append_len(b->buf, buf, ret);
b->active = rtpe_now.tv_sec;
b->active = timeval_from_us(rtpe_now).tv_sec;
}
mutex_unlock(&b->lock);
@ -233,7 +233,7 @@ void streambuf_write(struct streambuf *b, const char *s, unsigned int len) {
s += ret;
len -= ret;
b->active = rtpe_now.tv_sec;
b->active = timeval_from_us(rtpe_now).tv_sec;
}
if (b->buf->len > 5242880)

@ -245,11 +245,6 @@ static pthread_t thread_new(const char *name, void *(*fn)(void *), void *p) {
static inline long long us_ticks_scale(long long val) {
return val * ticks_per_sec / 1000000;
}
static inline long long now_us(void) {
struct timeval now;
gettimeofday(&now, NULL);
return timeval_us(&now);
}
// stream is locked

@ -123,7 +123,7 @@ unsigned int mix_get_index(mix_t *mix, void *ptr, unsigned int media_sdp_id, uns
struct timeval earliest = {0,};
next = 0;
for (unsigned int i = 0; i < mix_num_inputs; i++) {
if ((earliest.tv_sec == 0 || timeval_cmp(&earliest, &mix->last_use[i]) > 0) &&
if ((earliest.tv_sec == 0 || timeval_cmp(earliest, mix->last_use[i]) > 0) &&
i % mix->channel_slots == stream_channel_slot) {
next = i;
earliest = mix->last_use[i];

@ -490,7 +490,7 @@ int main(void) {
struct timeval last = { 100, 200 };
struct timeval now = { 100, 15200 };
ret = mix_buffer_write_delay(&mb, 0x3333, (int16_t[]){11,22,33,44,55}, 5, &last, &now);
ret = mix_buffer_write_delay(&mb, 0x3333, (int16_t[]){11,22,33,44,55}, 5, last, now);
assert(ret == true);
// mix-in previous source

@ -81,7 +81,7 @@ int main(void) {
dtls_init();
// fake current time
rtpe_now = (struct timeval) {150,0};
rtpe_now = 150 * 1000000LL;
rtpe_started = (struct timeval) {80,0};
// test cmd_time_min/max/avg
@ -3642,7 +3642,7 @@ int main(void) {
stats_rate_min_max(&rtpe_rate_graphite_min_max, &rtpe_stats_rate);
RTPE_STATS_ADD(ng_commands[OP_OFFER], 100);
rtpe_now.tv_sec += 2;
rtpe_now += 2 * 1000000LL;
RTPE_STATS_ADD(ng_commands[OP_OFFER], 20);
call_timer();
@ -3650,7 +3650,7 @@ int main(void) {
stats_rate_min_max(&rtpe_rate_graphite_min_max, &rtpe_stats_rate);
// timer run time interval increased
rtpe_now.tv_sec += 5;
rtpe_now += 5 * 1000000LL;
RTPE_STATS_ADD(ng_commands[OP_OFFER], 200);
call_timer();
@ -6030,7 +6030,7 @@ int main(void) {
struct timeval graphite_interval_tv = {100,0};
rtpe_now = (struct timeval) {200,0};
rtpe_now = 200 * 1000000LL;
add_total_calls_duration_in_interval(&graphite_interval_tv);

Loading…
Cancel
Save