Reorganize MagiskHide code

Prepare for zygote injection hiding
This commit is contained in:
topjohnwu 2021-01-10 17:11:00 -08:00
parent 53c3dd5e8b
commit b36e6d987d
5 changed files with 72 additions and 76 deletions

View File

@ -15,11 +15,43 @@
using namespace std;
static pthread_t proc_monitor_thread;
static pthread_t monitor_thread;
static bool hide_state = false;
static set<pair<string, string>> hide_set; /* set of <pkg, process> pair */
map<int, vector<string_view>> uid_proc_map; /* uid -> list of process */
// This locks the 2 variables above
static pthread_mutex_t hide_state_lock = PTHREAD_MUTEX_INITIALIZER;
// Locks the variables above
pthread_mutex_t hide_state_lock = PTHREAD_MUTEX_INITIALIZER;
void update_uid_map() {
mutex_guard lock(hide_state_lock);
uid_proc_map.clear();
string data_path(APP_DATA_DIR);
size_t len = data_path.length();
auto dir = open_dir(APP_DATA_DIR);
bool first_iter = true;
for (dirent *entry; (entry = xreaddir(dir.get()));) {
data_path.resize(len);
data_path += '/';
data_path += entry->d_name; // multiuser user id
data_path += '/';
size_t user_len = data_path.length();
struct stat st;
for (auto &hide : hide_set) {
if (hide.first == ISOLATED_MAGIC) {
if (!first_iter) continue;
// Setup isolated processes
uid_proc_map[-1].emplace_back(hide.second);
}
data_path.resize(user_len);
data_path += hide.first;
if (stat(data_path.data(), &st))
continue;
uid_proc_map[st.st_uid].emplace_back(hide.second);
}
first_iter = false;
}
}
// Leave /proc fd opened as we're going to read from it repeatedly
static DIR *procfp;
@ -43,11 +75,6 @@ bool hide_enabled() {
return hide_state;
}
void set_hide_state(bool state) {
mutex_guard g(hide_state_lock);
hide_state = state;
}
template <bool str_op(string_view, string_view)>
static bool proc_name_match(int pid, const char *name) {
char buf[4019];
@ -124,7 +151,7 @@ static int add_list(const char *pkg, const char *proc) {
{
// Critical region
mutex_guard lock(monitor_lock);
mutex_guard lock(hide_state_lock);
add_hide_set(pkg, proc);
}
@ -146,7 +173,7 @@ static int rm_list(const char *pkg, const char *proc) {
bool remove = false;
{
// Critical region
mutex_guard lock(monitor_lock);
mutex_guard lock(hide_state_lock);
for (auto it = hide_set.begin(); it != hide_set.end();) {
if (it->first == pkg && (proc[0] == '\0' || it->second == proc)) {
remove = true;
@ -256,9 +283,6 @@ int launch_magiskhide() {
LOGI("* Starting MagiskHide\n");
// Initialize the mutex lock
pthread_mutex_init(&monitor_lock, nullptr);
// Initialize the hide list
if (!init_list())
return DAEMON_ERROR;
@ -268,8 +292,7 @@ int launch_magiskhide() {
hide_late_sensitive_props();
// Start monitoring
void *(*start)(void*) = [](void*) -> void* { proc_monitor(); };
if (xpthread_create(&proc_monitor_thread, nullptr, start, nullptr))
if (new_daemon_thread(&proc_monitor))
return DAEMON_ERROR;
hide_state = true;
@ -282,7 +305,9 @@ int stop_magiskhide() {
if (hide_state) {
LOGI("* Stopping MagiskHide\n");
pthread_kill(proc_monitor_thread, SIGTERMTHRD);
uid_proc_map.clear();
hide_set.clear();
pthread_kill(monitor_thread, SIGTERMTHRD);
}
hide_state = false;
@ -292,7 +317,7 @@ int stop_magiskhide() {
void auto_start_magiskhide() {
if (hide_enabled()) {
pthread_kill(proc_monitor_thread, SIGALRM);
pthread_kill(monitor_thread, SIGALRM);
hide_late_sensitive_props();
} else if (SDK_INT >= 19) {
db_settings dbs;

View File

@ -25,13 +25,12 @@ void ls_list(int client);
// Process monitoring
[[noreturn]] void proc_monitor();
void update_uid_map();
// Utility functions
void crawl_procfs(const std::function<bool (int)> &fn);
void crawl_procfs(DIR *dir, const std::function<bool (int)> &fn);
bool hide_enabled();
void set_hide_state(bool state);
void update_uid_map();
// Hide policies
void hide_daemon(int pid);
@ -39,8 +38,8 @@ void hide_unmount(int pid = getpid());
void hide_sensitive_props();
void hide_late_sensitive_props();
extern pthread_mutex_t monitor_lock;
extern std::set<std::pair<std::string, std::string>> hide_set;
extern pthread_mutex_t hide_state_lock;
extern std::map<int, std::vector<std::string_view>> uid_proc_map;
enum {
LAUNCH_MAGISKHIDE,

View File

@ -18,18 +18,11 @@ using namespace std;
static int inotify_fd = -1;
static void term_thread(int sig = SIGTERMTHRD);
static void new_zygote(int pid);
/**********************
* All data structures
**********************/
set<pair<string, string>> hide_set; /* set of <pkg, process> pair */
static map<int, struct stat> zygote_map; /* zygote pid -> mnt ns */
static map<int, vector<string_view>> uid_proc_map; /* uid -> list of process */
pthread_mutex_t monitor_lock;
/******************
* Data structures
******************/
#define PID_MAX 32768
struct pid_set {
@ -41,7 +34,10 @@ private:
};
// true if pid is monitored
pid_set attaches;
static pid_set attaches;
// zygote pid -> mnt ns
static map<int, struct stat> zygote_map;
/********
* Utils
@ -69,36 +65,6 @@ static int parse_ppid(int pid) {
return ppid;
}
void update_uid_map() {
mutex_guard lock(monitor_lock);
uid_proc_map.clear();
string data_path(APP_DATA_DIR);
size_t len = data_path.length();
auto dir = open_dir(APP_DATA_DIR);
bool first_iter = true;
for (dirent *entry; (entry = xreaddir(dir.get()));) {
data_path.resize(len);
data_path += '/';
data_path += entry->d_name; // multiuser user id
data_path += '/';
size_t user_len = data_path.length();
struct stat st;
for (auto &hide : hide_set) {
if (hide.first == ISOLATED_MAGIC) {
if (!first_iter) continue;
// Setup isolated processes
uid_proc_map[-1].emplace_back(hide.second);
}
data_path.resize(user_len);
data_path += hide.first;
if (stat(data_path.data(), &st))
continue;
uid_proc_map[st.st_uid].emplace_back(hide.second);
}
first_iter = false;
}
}
static bool is_zygote_done() {
#ifdef __LP64__
return zygote_map.size() >= 2;
@ -132,7 +98,7 @@ static void check_zygote() {
static void setup_inotify() {
inotify_fd = xinotify_init1(IN_CLOEXEC);
if (inotify_fd < 0)
term_thread();
return;
// Setup inotify asynchronous I/O
fcntl(inotify_fd, F_SETFL, O_ASYNC);
@ -160,8 +126,8 @@ static void setup_inotify() {
************************/
static void inotify_event(int) {
/* Make sure we can actually read stuffs
* or else the whole thread will be blocked.*/
// Make sure we can actually read stuffs
// or else the whole thread will be blocked.
struct pollfd pfd = {
.fd = inotify_fd,
.events = POLLIN,
@ -180,13 +146,8 @@ static void inotify_event(int) {
// Workaround for the lack of pthread_cancel
static void term_thread(int) {
LOGD("proc_monitor: cleaning up\n");
uid_proc_map.clear();
zygote_map.clear();
hide_set.clear();
attaches.reset();
// Misc
set_hide_state(false);
pthread_mutex_destroy(&monitor_lock);
close(inotify_fd);
inotify_fd = -1;
LOGD("proc_monitor: terminate\n");
@ -240,6 +201,9 @@ static bool check_pid(int pid) {
return false;
int uid = st.st_uid;
// Start accessing uid_proc_map
mutex_guard lock(hide_state_lock);
auto it = uid_proc_map.end();
if (uid % 100000 > 90000) {

View File

@ -121,15 +121,22 @@ int new_daemon_thread(thread_entry entry, void *arg) {
return xpthread_create(&thread, &attr, entry, arg);
}
static void *proxy_routine(void *fp) {
int new_daemon_thread(void(*entry)()) {
thread_entry proxy = [](void *entry) -> void * {
reinterpret_cast<void(*)()>(entry)();
return nullptr;
};
return new_daemon_thread(proxy, (void *) entry);
}
int new_daemon_thread(std::function<void()> &&entry) {
thread_entry proxy = [](void *fp) -> void * {
auto fn = reinterpret_cast<std::function<void()>*>(fp);
(*fn)();
delete fn;
return nullptr;
}
int new_daemon_thread(std::function<void()> &&entry) {
return new_daemon_thread(proxy_routine, new std::function<void()>(std::move(entry)));
};
return new_daemon_thread(proxy, new std::function<void()>(std::move(entry)));
}
static char *argv0;

View File

@ -60,6 +60,7 @@ static inline int parse_int(std::string_view s) { return parse_int(s.data()); }
using thread_entry = void *(*)(void *);
int new_daemon_thread(thread_entry entry, void *arg = nullptr);
int new_daemon_thread(void(*entry)());
int new_daemon_thread(std::function<void()> &&entry);
static inline bool str_contains(std::string_view s, std::string_view ss) {