Loading liblog/event_tag_map.c +1 −0 Original line number Diff line number Diff line Loading @@ -120,6 +120,7 @@ LIBLOG_ABI_PUBLIC void android_closeEventTagMap(EventTagMap* map) return; munmap(map->mapAddr, map->mapLen); free(map->tagArray); free(map); } Loading liblog/logger_write.c +32 −4 Original line number Diff line number Diff line Loading @@ -132,12 +132,20 @@ LIBLOG_ABI_PUBLIC int __android_log_dev_available() } return kLogNotAvailable; } #if defined(__BIONIC__) static atomic_uintptr_t tagMap; #endif /* * Release any logger resources. A new log write will immediately re-acquire. */ LIBLOG_ABI_PUBLIC void __android_log_close() { struct android_log_transport_write *transport; #if defined(__BIONIC__) EventTagMap *m; #endif __android_log_lock(); Loading Loading @@ -165,7 +173,28 @@ LIBLOG_ABI_PUBLIC void __android_log_close() } } #if defined(__BIONIC__) /* * Additional risk here somewhat mitigated by immediately unlock flushing * the processor cache. The multi-threaded race that we choose to accept, * to minimize locking, is an atomic_load in a writer picking up a value * just prior to entering this routine. There will be an use after free. * * Again, anyone calling this is doing so to release the logging resources * is most probably going to quiesce then shut down; or to restart after * a fork so the risk should be non-existent. For this reason we * choose a mitigation stance for efficiency instead of incuring the cost * of a lock for every log write. */ m = (EventTagMap *)atomic_exchange(&tagMap, (uintptr_t)0); #endif __android_log_unlock(); #if defined(__BIONIC__) android_closeEventTagMap(m); #endif } /* log_init_lock assumed */ Loading Loading @@ -250,7 +279,6 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr) return -EPERM; } } else if (log_id == LOG_ID_EVENTS) { static atomic_uintptr_t map; const char *tag; EventTagMap *m, *f; Loading @@ -260,11 +288,11 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr) tag = NULL; f = NULL; m = (EventTagMap *)atomic_load(&map); m = (EventTagMap *)atomic_load(&tagMap); if (!m) { ret = __android_log_trylock(); m = (EventTagMap *)atomic_load(&map); /* trylock flush cache */ m = (EventTagMap *)atomic_load(&tagMap); /* trylock flush cache */ if (!m) { m = android_openEventTagMap(EVENT_TAG_MAP_FILE); if (ret) { /* trylock failed, use local copy, mark for close */ Loading @@ -273,7 +301,7 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr) if (!m) { /* One chance to open map file */ m = (EventTagMap *)(uintptr_t)-1LL; } atomic_store(&map, (uintptr_t)m); atomic_store(&tagMap, (uintptr_t)m); } } if (!ret) { /* trylock succeeded, unlock */ Loading Loading
liblog/event_tag_map.c +1 −0 Original line number Diff line number Diff line Loading @@ -120,6 +120,7 @@ LIBLOG_ABI_PUBLIC void android_closeEventTagMap(EventTagMap* map) return; munmap(map->mapAddr, map->mapLen); free(map->tagArray); free(map); } Loading
liblog/logger_write.c +32 −4 Original line number Diff line number Diff line Loading @@ -132,12 +132,20 @@ LIBLOG_ABI_PUBLIC int __android_log_dev_available() } return kLogNotAvailable; } #if defined(__BIONIC__) static atomic_uintptr_t tagMap; #endif /* * Release any logger resources. A new log write will immediately re-acquire. */ LIBLOG_ABI_PUBLIC void __android_log_close() { struct android_log_transport_write *transport; #if defined(__BIONIC__) EventTagMap *m; #endif __android_log_lock(); Loading Loading @@ -165,7 +173,28 @@ LIBLOG_ABI_PUBLIC void __android_log_close() } } #if defined(__BIONIC__) /* * Additional risk here somewhat mitigated by immediately unlock flushing * the processor cache. The multi-threaded race that we choose to accept, * to minimize locking, is an atomic_load in a writer picking up a value * just prior to entering this routine. There will be an use after free. * * Again, anyone calling this is doing so to release the logging resources * is most probably going to quiesce then shut down; or to restart after * a fork so the risk should be non-existent. For this reason we * choose a mitigation stance for efficiency instead of incuring the cost * of a lock for every log write. */ m = (EventTagMap *)atomic_exchange(&tagMap, (uintptr_t)0); #endif __android_log_unlock(); #if defined(__BIONIC__) android_closeEventTagMap(m); #endif } /* log_init_lock assumed */ Loading Loading @@ -250,7 +279,6 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr) return -EPERM; } } else if (log_id == LOG_ID_EVENTS) { static atomic_uintptr_t map; const char *tag; EventTagMap *m, *f; Loading @@ -260,11 +288,11 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr) tag = NULL; f = NULL; m = (EventTagMap *)atomic_load(&map); m = (EventTagMap *)atomic_load(&tagMap); if (!m) { ret = __android_log_trylock(); m = (EventTagMap *)atomic_load(&map); /* trylock flush cache */ m = (EventTagMap *)atomic_load(&tagMap); /* trylock flush cache */ if (!m) { m = android_openEventTagMap(EVENT_TAG_MAP_FILE); if (ret) { /* trylock failed, use local copy, mark for close */ Loading @@ -273,7 +301,7 @@ static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr) if (!m) { /* One chance to open map file */ m = (EventTagMap *)(uintptr_t)-1LL; } atomic_store(&map, (uintptr_t)m); atomic_store(&tagMap, (uintptr_t)m); } } if (!ret) { /* trylock succeeded, unlock */ Loading