2017-04-15 03:21:31 +08:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <fcntl.h>
|
2018-10-04 04:59:51 -04:00
|
|
|
#include <pwd.h>
|
2017-04-15 03:21:31 +08:00
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/wait.h>
|
2019-06-04 21:21:27 -07:00
|
|
|
#include <sys/mount.h>
|
2017-04-15 03:21:31 +08:00
|
|
|
|
2023-11-08 01:46:02 -08:00
|
|
|
#include <consts.hpp>
|
2022-05-12 02:03:42 -07:00
|
|
|
#include <base.hpp>
|
2020-03-09 01:50:30 -07:00
|
|
|
#include <selinux.hpp>
|
2019-02-10 03:57:51 -05:00
|
|
|
|
2020-03-09 01:50:30 -07:00
|
|
|
#include "su.hpp"
|
|
|
|
#include "pts.hpp"
|
2017-04-15 03:21:31 +08:00
|
|
|
|
2019-07-07 00:31:49 -07:00
|
|
|
using namespace std;
|
|
|
|
|
2018-10-03 23:31:15 -04:00
|
|
|
static pthread_mutex_t cache_lock = PTHREAD_MUTEX_INITIALIZER;
|
2019-07-07 00:31:49 -07:00
|
|
|
static shared_ptr<su_info> cached;
|
2018-11-04 03:38:06 -05:00
|
|
|
|
2022-02-06 06:45:58 -08:00
|
|
|
su_info::su_info(int uid) :
|
2025-01-05 00:44:05 -08:00
|
|
|
uid(uid), eval_uid(-1), cfg(DbSettings()), access(RootSettings()),
|
2025-01-03 11:38:15 -08:00
|
|
|
mgr_uid(-1), timestamp(0), _lock(PTHREAD_MUTEX_INITIALIZER) {}
|
2018-11-04 03:38:06 -05:00
|
|
|
|
|
|
|
su_info::~su_info() {
|
2020-12-30 22:11:24 -08:00
|
|
|
pthread_mutex_destroy(&_lock);
|
2018-11-04 03:38:06 -05:00
|
|
|
}
|
|
|
|
|
2019-09-26 01:49:50 -04:00
|
|
|
mutex_guard su_info::lock() {
|
2020-12-30 22:11:24 -08:00
|
|
|
return mutex_guard(_lock);
|
2018-11-04 03:38:06 -05:00
|
|
|
}
|
2017-05-29 18:54:33 +08:00
|
|
|
|
2019-07-07 00:31:49 -07:00
|
|
|
bool su_info::is_fresh() {
|
2020-12-30 22:11:24 -08:00
|
|
|
timespec ts;
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
|
|
long current = ts.tv_sec * 1000L + ts.tv_nsec / 1000000L;
|
|
|
|
return current - timestamp < 3000; /* 3 seconds */
|
2018-12-26 11:56:49 +08:00
|
|
|
}
|
|
|
|
|
2019-07-07 00:31:49 -07:00
|
|
|
void su_info::refresh() {
|
2020-12-30 22:11:24 -08:00
|
|
|
timespec ts;
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
|
|
timestamp = ts.tv_sec * 1000L + ts.tv_nsec / 1000000L;
|
2017-05-08 03:08:34 +08:00
|
|
|
}
|
2017-04-16 02:28:12 +08:00
|
|
|
|
2022-02-06 06:45:58 -08:00
|
|
|
void su_info::check_db() {
|
|
|
|
eval_uid = uid;
|
2025-01-03 11:38:15 -08:00
|
|
|
MagiskD().get_db_settings(cfg);
|
2020-12-30 22:11:24 -08:00
|
|
|
|
|
|
|
// Check multiuser settings
|
2024-12-31 22:52:34 -08:00
|
|
|
switch (cfg.multiuser_mode) {
|
2025-01-03 11:38:15 -08:00
|
|
|
case MultiuserMode::OwnerOnly:
|
2022-02-06 06:45:58 -08:00
|
|
|
if (to_user_id(uid) != 0) {
|
|
|
|
eval_uid = -1;
|
2025-01-05 00:44:05 -08:00
|
|
|
access = SILENT_DENY;
|
2022-02-06 06:45:58 -08:00
|
|
|
}
|
|
|
|
break;
|
2025-01-03 11:38:15 -08:00
|
|
|
case MultiuserMode::OwnerManaged:
|
2022-02-06 06:45:58 -08:00
|
|
|
eval_uid = to_app_id(uid);
|
|
|
|
break;
|
2025-01-03 11:38:15 -08:00
|
|
|
case MultiuserMode::User:
|
2022-02-06 06:45:58 -08:00
|
|
|
default:
|
|
|
|
break;
|
2020-12-30 22:11:24 -08:00
|
|
|
}
|
|
|
|
|
2022-02-06 06:45:58 -08:00
|
|
|
if (eval_uid > 0) {
|
2025-01-05 00:44:05 -08:00
|
|
|
if (!MagiskD().get_root_settings(eval_uid, access))
|
2024-08-14 11:45:05 -07:00
|
|
|
return;
|
2022-01-17 19:54:33 -08:00
|
|
|
}
|
2020-12-30 22:11:24 -08:00
|
|
|
|
|
|
|
// We need to check our manager
|
2025-01-05 00:44:05 -08:00
|
|
|
if (access.policy == SuPolicy::Query || access.log || access.notify) {
|
2022-05-19 22:54:49 -07:00
|
|
|
mgr_uid = get_manager(to_user_id(eval_uid), &mgr_pkg, true);
|
2022-05-29 23:31:57 -07:00
|
|
|
}
|
2018-06-13 04:33:32 +08:00
|
|
|
}
|
2017-05-29 18:54:33 +08:00
|
|
|
|
2022-01-17 19:54:33 -08:00
|
|
|
bool uid_granted_root(int uid) {
|
2022-05-30 02:09:07 -07:00
|
|
|
if (uid == AID_ROOT)
|
2022-01-17 19:54:33 -08:00
|
|
|
return true;
|
|
|
|
|
2025-01-03 11:38:15 -08:00
|
|
|
auto cfg = DbSettings();
|
|
|
|
MagiskD().get_db_settings(cfg);
|
2022-01-17 19:54:33 -08:00
|
|
|
|
|
|
|
// Check user root access settings
|
2024-12-31 22:52:34 -08:00
|
|
|
switch (cfg.root_access) {
|
2025-01-03 11:38:15 -08:00
|
|
|
case RootAccess::Disabled:
|
2022-01-17 19:54:33 -08:00
|
|
|
return false;
|
2025-01-03 11:38:15 -08:00
|
|
|
case RootAccess::AppsOnly:
|
2022-05-30 02:09:07 -07:00
|
|
|
if (uid == AID_SHELL)
|
2022-01-17 19:54:33 -08:00
|
|
|
return false;
|
|
|
|
break;
|
2025-01-03 11:38:15 -08:00
|
|
|
case RootAccess::AdbOnly:
|
2022-05-30 02:09:07 -07:00
|
|
|
if (uid != AID_SHELL)
|
2022-01-17 19:54:33 -08:00
|
|
|
return false;
|
|
|
|
break;
|
2025-01-03 11:38:15 -08:00
|
|
|
case RootAccess::AppsAndAdb:
|
2022-01-17 19:54:33 -08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check multiuser settings
|
2024-12-31 22:52:34 -08:00
|
|
|
switch (cfg.multiuser_mode) {
|
2025-01-03 11:38:15 -08:00
|
|
|
case MultiuserMode::OwnerOnly:
|
2022-01-17 19:54:33 -08:00
|
|
|
if (to_user_id(uid) != 0)
|
|
|
|
return false;
|
|
|
|
break;
|
2025-01-03 11:38:15 -08:00
|
|
|
case MultiuserMode::OwnerManaged:
|
2022-01-17 19:54:33 -08:00
|
|
|
uid = to_app_id(uid);
|
|
|
|
break;
|
2025-01-03 11:38:15 -08:00
|
|
|
case MultiuserMode::User:
|
2022-01-17 19:54:33 -08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2025-01-02 01:04:44 -08:00
|
|
|
bool granted = false;
|
|
|
|
db_exec("SELECT policy FROM policies WHERE uid=? AND (until=0 OR until>?)",
|
|
|
|
{ uid, time(nullptr) },
|
2025-01-05 00:44:05 -08:00
|
|
|
[&](auto, const DbValues &values) { granted = values.get_int(0) == +SuPolicy::Allow; });
|
2025-01-02 01:04:44 -08:00
|
|
|
return granted;
|
2022-01-17 19:54:33 -08:00
|
|
|
}
|
|
|
|
|
2024-12-31 22:52:34 -08:00
|
|
|
struct policy_uid_list : public vector<int> {
|
2025-01-04 01:45:23 -08:00
|
|
|
void operator()(StringSlice, const DbValues &values) {
|
|
|
|
push_back(values.get_int(0));
|
2024-12-31 22:52:34 -08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-05-28 22:39:44 -07:00
|
|
|
void prune_su_access() {
|
2022-05-29 23:31:57 -07:00
|
|
|
cached.reset();
|
2024-12-31 22:52:34 -08:00
|
|
|
policy_uid_list uids;
|
2025-01-02 01:04:44 -08:00
|
|
|
if (!db_exec("SELECT uid FROM policies", {}, uids))
|
2024-12-31 22:52:34 -08:00
|
|
|
return;
|
2022-05-18 01:55:58 -07:00
|
|
|
vector<bool> app_no_list = get_app_no_list();
|
|
|
|
vector<int> rm_uids;
|
2024-12-31 22:52:34 -08:00
|
|
|
for (int uid : uids) {
|
2022-05-18 01:55:58 -07:00
|
|
|
int app_id = to_app_id(uid);
|
|
|
|
if (app_id >= AID_APP_START && app_id <= AID_APP_END) {
|
|
|
|
int app_no = app_id - AID_APP_START;
|
|
|
|
if (app_no >= app_no_list.size() || !app_no_list[app_no]) {
|
|
|
|
// The app_id is no longer installed
|
|
|
|
rm_uids.push_back(uid);
|
|
|
|
}
|
|
|
|
}
|
2024-12-31 22:52:34 -08:00
|
|
|
}
|
2022-05-18 01:55:58 -07:00
|
|
|
for (int uid : rm_uids) {
|
2024-08-14 11:45:05 -07:00
|
|
|
char query[256];
|
2022-09-09 04:29:50 -07:00
|
|
|
ssprintf(query, sizeof(query), "DELETE FROM policies WHERE uid == %d", uid);
|
2024-12-29 03:52:21 -08:00
|
|
|
db_exec(query);
|
2022-05-18 01:55:58 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-13 14:05:28 -04:00
|
|
|
static shared_ptr<su_info> get_su_info(unsigned uid) {
|
Fix race condition when switching root manager
Before this change, the root manager package name is only written into
the database after the repackaged APK is installed. In the time between
the repackaged APK being installed and the package name being written
into the database, if some operation calls `get_manager`, the Magisk
daemon will cache this result and ignore the repackaged APK, even if
the package name is set afterwards, because the cache won't be
invalidated. The result is that the repackaged manager APK will not be
recognized as the root manager, breaking the hide manager feature.
This race condition is more likely to happen when Zygisk is enabled,
because `get_manager` is called with a very high frequency in that case.
To fix the issue, we have to set the new package name into the database
BEFORE installing the repackaged APK. We also stop pruning the
database if the repackaged manager is not found, moving this logic into
the Magisk app. By doing so, we can guarantee that the instant after
the repackaged manager APK is installed, the Magisk daemon will
immediately pick it up and treat it as the root manager.
Another small optimization: when the requester is root, simply bypass
the whole database + manager package check. Since the Magisk app hiding
APK installation proces will call `su` several times to run `pm` under
different UIDs, doing this opimization will reduce the amount of
unnecessary database query + filesystem traversals.
2023-03-06 03:25:59 -08:00
|
|
|
if (uid == AID_ROOT) {
|
|
|
|
auto info = make_shared<su_info>(uid);
|
2025-01-05 00:44:05 -08:00
|
|
|
info->access = SILENT_ALLOW;
|
Fix race condition when switching root manager
Before this change, the root manager package name is only written into
the database after the repackaged APK is installed. In the time between
the repackaged APK being installed and the package name being written
into the database, if some operation calls `get_manager`, the Magisk
daemon will cache this result and ignore the repackaged APK, even if
the package name is set afterwards, because the cache won't be
invalidated. The result is that the repackaged manager APK will not be
recognized as the root manager, breaking the hide manager feature.
This race condition is more likely to happen when Zygisk is enabled,
because `get_manager` is called with a very high frequency in that case.
To fix the issue, we have to set the new package name into the database
BEFORE installing the repackaged APK. We also stop pruning the
database if the repackaged manager is not found, moving this logic into
the Magisk app. By doing so, we can guarantee that the instant after
the repackaged manager APK is installed, the Magisk daemon will
immediately pick it up and treat it as the root manager.
Another small optimization: when the requester is root, simply bypass
the whole database + manager package check. Since the Magisk app hiding
APK installation proces will call `su` several times to run `pm` under
different UIDs, doing this opimization will reduce the amount of
unnecessary database query + filesystem traversals.
2023-03-06 03:25:59 -08:00
|
|
|
return info;
|
|
|
|
}
|
2020-12-30 22:11:24 -08:00
|
|
|
|
Fix race condition when switching root manager
Before this change, the root manager package name is only written into
the database after the repackaged APK is installed. In the time between
the repackaged APK being installed and the package name being written
into the database, if some operation calls `get_manager`, the Magisk
daemon will cache this result and ignore the repackaged APK, even if
the package name is set afterwards, because the cache won't be
invalidated. The result is that the repackaged manager APK will not be
recognized as the root manager, breaking the hide manager feature.
This race condition is more likely to happen when Zygisk is enabled,
because `get_manager` is called with a very high frequency in that case.
To fix the issue, we have to set the new package name into the database
BEFORE installing the repackaged APK. We also stop pruning the
database if the repackaged manager is not found, moving this logic into
the Magisk app. By doing so, we can guarantee that the instant after
the repackaged manager APK is installed, the Magisk daemon will
immediately pick it up and treat it as the root manager.
Another small optimization: when the requester is root, simply bypass
the whole database + manager package check. Since the Magisk app hiding
APK installation proces will call `su` several times to run `pm` under
different UIDs, doing this opimization will reduce the amount of
unnecessary database query + filesystem traversals.
2023-03-06 03:25:59 -08:00
|
|
|
shared_ptr<su_info> info;
|
2020-12-30 22:11:24 -08:00
|
|
|
{
|
|
|
|
mutex_guard lock(cache_lock);
|
|
|
|
if (!cached || cached->uid != uid || !cached->is_fresh())
|
|
|
|
cached = make_shared<su_info>(uid);
|
|
|
|
cached->refresh();
|
|
|
|
info = cached;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_guard lock = info->lock();
|
|
|
|
|
2025-01-05 00:44:05 -08:00
|
|
|
if (info->access.policy == SuPolicy::Query) {
|
2020-12-30 22:11:24 -08:00
|
|
|
// Not cached, get data from database
|
2022-02-06 06:45:58 -08:00
|
|
|
info->check_db();
|
2020-12-30 22:11:24 -08:00
|
|
|
|
Fix race condition when switching root manager
Before this change, the root manager package name is only written into
the database after the repackaged APK is installed. In the time between
the repackaged APK being installed and the package name being written
into the database, if some operation calls `get_manager`, the Magisk
daemon will cache this result and ignore the repackaged APK, even if
the package name is set afterwards, because the cache won't be
invalidated. The result is that the repackaged manager APK will not be
recognized as the root manager, breaking the hide manager feature.
This race condition is more likely to happen when Zygisk is enabled,
because `get_manager` is called with a very high frequency in that case.
To fix the issue, we have to set the new package name into the database
BEFORE installing the repackaged APK. We also stop pruning the
database if the repackaged manager is not found, moving this logic into
the Magisk app. By doing so, we can guarantee that the instant after
the repackaged manager APK is installed, the Magisk daemon will
immediately pick it up and treat it as the root manager.
Another small optimization: when the requester is root, simply bypass
the whole database + manager package check. Since the Magisk app hiding
APK installation proces will call `su` several times to run `pm` under
different UIDs, doing this opimization will reduce the amount of
unnecessary database query + filesystem traversals.
2023-03-06 03:25:59 -08:00
|
|
|
// If it's the manager, allow it silently
|
|
|
|
if (to_app_id(info->uid) == to_app_id(info->mgr_uid)) {
|
2025-01-05 00:44:05 -08:00
|
|
|
info->access = SILENT_ALLOW;
|
2020-12-30 22:11:24 -08:00
|
|
|
return info;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check su access settings
|
2024-12-31 22:52:34 -08:00
|
|
|
switch (info->cfg.root_access) {
|
2025-01-03 11:38:15 -08:00
|
|
|
case RootAccess::Disabled:
|
2020-12-30 22:11:24 -08:00
|
|
|
LOGW("Root access is disabled!\n");
|
2025-01-05 00:44:05 -08:00
|
|
|
info->access = SILENT_DENY;
|
2020-12-30 22:11:24 -08:00
|
|
|
break;
|
2025-01-03 11:38:15 -08:00
|
|
|
case RootAccess::AdbOnly:
|
2022-05-30 02:09:07 -07:00
|
|
|
if (info->uid != AID_SHELL) {
|
2020-12-30 22:11:24 -08:00
|
|
|
LOGW("Root access limited to ADB only!\n");
|
2025-01-05 00:44:05 -08:00
|
|
|
info->access = SILENT_DENY;
|
2020-12-30 22:11:24 -08:00
|
|
|
}
|
|
|
|
break;
|
2025-01-03 11:38:15 -08:00
|
|
|
case RootAccess::AppsOnly:
|
2022-05-30 02:09:07 -07:00
|
|
|
if (info->uid == AID_SHELL) {
|
2020-12-30 22:11:24 -08:00
|
|
|
LOGW("Root access is disabled for ADB!\n");
|
2025-01-05 00:44:05 -08:00
|
|
|
info->access = SILENT_DENY;
|
2020-12-30 22:11:24 -08:00
|
|
|
}
|
|
|
|
break;
|
2025-01-03 11:38:15 -08:00
|
|
|
case RootAccess::AppsAndAdb:
|
2020-12-30 22:11:24 -08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2025-01-05 00:44:05 -08:00
|
|
|
if (info->access.policy != SuPolicy::Query)
|
2020-12-30 22:11:24 -08:00
|
|
|
return info;
|
|
|
|
|
|
|
|
// If still not determined, check if manager exists
|
2022-05-18 01:55:58 -07:00
|
|
|
if (info->mgr_uid < 0) {
|
2025-01-05 00:44:05 -08:00
|
|
|
info->access = SILENT_DENY;
|
2020-12-30 22:11:24 -08:00
|
|
|
return info;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return info;
|
2018-06-13 04:33:32 +08:00
|
|
|
}
|
|
|
|
|
2020-09-10 00:38:29 -07:00
|
|
|
// Set effective uid back to root, otherwise setres[ug]id will fail if uid isn't root
|
2023-05-16 19:26:44 +08:00
|
|
|
static void set_identity(uid_t uid, const std::vector<uid_t> &groups) {
|
2020-12-30 22:11:24 -08:00
|
|
|
if (seteuid(0)) {
|
|
|
|
PLOGE("seteuid (root)");
|
|
|
|
}
|
2023-05-16 19:26:44 +08:00
|
|
|
gid_t gid;
|
|
|
|
if (groups.size() > 0) {
|
|
|
|
if (setgroups(groups.size(), groups.data())) {
|
|
|
|
PLOGE("setgroups");
|
|
|
|
}
|
|
|
|
gid = groups[0];
|
|
|
|
} else {
|
|
|
|
gid = uid;
|
|
|
|
}
|
|
|
|
if (setresgid(gid, gid, gid)) {
|
2020-12-30 22:11:24 -08:00
|
|
|
PLOGE("setresgid (%u)", uid);
|
|
|
|
}
|
|
|
|
if (setresuid(uid, uid, uid)) {
|
|
|
|
PLOGE("setresuid (%u)", uid);
|
|
|
|
}
|
2018-10-04 04:59:51 -04:00
|
|
|
}
|
|
|
|
|
2021-10-19 23:46:38 -07:00
|
|
|
void su_daemon_handler(int client, const sock_cred *cred) {
|
2023-10-24 16:41:49 -07:00
|
|
|
LOGD("su: request from uid=[%d], pid=[%d], client=[%d]\n", cred->uid, cred->pid, client);
|
2020-12-30 22:11:24 -08:00
|
|
|
|
|
|
|
su_context ctx = {
|
2021-10-19 23:46:38 -07:00
|
|
|
.info = get_su_info(cred->uid),
|
2021-01-12 00:07:48 -08:00
|
|
|
.req = su_request(),
|
2021-10-19 23:46:38 -07:00
|
|
|
.pid = cred->pid
|
2020-12-30 22:11:24 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
// Read su_request
|
2022-04-29 23:44:02 +08:00
|
|
|
if (xxread(client, &ctx.req, sizeof(su_req_base)) < 0
|
|
|
|
|| !read_string(client, ctx.req.shell)
|
2023-05-16 19:26:44 +08:00
|
|
|
|| !read_string(client, ctx.req.command)
|
2023-05-18 01:25:05 +08:00
|
|
|
|| !read_string(client, ctx.req.context)
|
2023-05-16 19:26:44 +08:00
|
|
|
|| !read_vector(client, ctx.req.gids)) {
|
2022-04-03 13:09:23 +08:00
|
|
|
LOGW("su: remote process probably died, abort\n");
|
|
|
|
ctx.info.reset();
|
2025-01-05 00:44:05 -08:00
|
|
|
write_int(client, +SuPolicy::Deny);
|
2022-04-03 13:09:23 +08:00
|
|
|
close(client);
|
|
|
|
return;
|
|
|
|
}
|
2020-12-30 22:11:24 -08:00
|
|
|
|
2022-03-25 13:08:13 -07:00
|
|
|
// If still not determined, ask manager
|
2025-01-05 00:44:05 -08:00
|
|
|
if (ctx.info->access.policy == SuPolicy::Query) {
|
2022-03-25 13:08:13 -07:00
|
|
|
int fd = app_request(ctx);
|
|
|
|
if (fd < 0) {
|
2025-01-05 00:44:05 -08:00
|
|
|
ctx.info->access.policy = SuPolicy::Deny;
|
2022-03-25 13:08:13 -07:00
|
|
|
} else {
|
|
|
|
int ret = read_int_be(fd);
|
2025-01-05 00:44:05 -08:00
|
|
|
ctx.info->access.policy = ret < 0 ? SuPolicy::Deny : static_cast<SuPolicy>(ret);
|
2022-03-25 13:08:13 -07:00
|
|
|
close(fd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-30 22:11:24 -08:00
|
|
|
if (ctx.info->access.log)
|
|
|
|
app_log(ctx);
|
|
|
|
else if (ctx.info->access.notify)
|
|
|
|
app_notify(ctx);
|
|
|
|
|
|
|
|
// Fail fast
|
2025-01-05 00:44:05 -08:00
|
|
|
if (ctx.info->access.policy == SuPolicy::Deny) {
|
2020-12-30 22:11:24 -08:00
|
|
|
LOGW("su: request rejected (%u)\n", ctx.info->uid);
|
|
|
|
ctx.info.reset();
|
2025-01-05 00:44:05 -08:00
|
|
|
write_int(client, +SuPolicy::Deny);
|
2020-12-30 22:11:24 -08:00
|
|
|
close(client);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fork a child root process
|
|
|
|
//
|
|
|
|
// The child process will need to setsid, open a pseudo-terminal
|
|
|
|
// if needed, and eventually exec shell.
|
|
|
|
// The parent process will wait for the result and
|
|
|
|
// send the return code back to our client.
|
|
|
|
|
|
|
|
if (int child = xfork(); child) {
|
|
|
|
ctx.info.reset();
|
|
|
|
|
|
|
|
// Wait result
|
|
|
|
LOGD("su: waiting child pid=[%d]\n", child);
|
|
|
|
int status, code;
|
|
|
|
|
|
|
|
if (waitpid(child, &status, 0) > 0)
|
|
|
|
code = WEXITSTATUS(status);
|
|
|
|
else
|
|
|
|
code = -1;
|
|
|
|
|
|
|
|
LOGD("su: return code=[%d]\n", code);
|
|
|
|
write(client, &code, sizeof(code));
|
|
|
|
close(client);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOGD("su: fork handler\n");
|
|
|
|
|
|
|
|
// Abort upon any error occurred
|
2022-07-01 04:53:41 -07:00
|
|
|
exit_on_error(true);
|
2020-12-30 22:11:24 -08:00
|
|
|
|
|
|
|
// ack
|
|
|
|
write_int(client, 0);
|
|
|
|
|
|
|
|
// Become session leader
|
|
|
|
xsetsid();
|
|
|
|
|
|
|
|
// The FDs for each of the streams
|
2021-07-22 23:35:14 +08:00
|
|
|
int infd = recv_fd(client);
|
2020-12-30 22:11:24 -08:00
|
|
|
int outfd = recv_fd(client);
|
|
|
|
int errfd = recv_fd(client);
|
|
|
|
|
2021-07-22 23:35:14 +08:00
|
|
|
// App need a PTY
|
|
|
|
if (read_int(client)) {
|
|
|
|
string pts;
|
|
|
|
string ptmx;
|
2023-11-02 15:50:36 -07:00
|
|
|
auto magiskpts = get_magisk_tmp() + "/"s SHELLPTS;
|
2021-07-22 23:35:14 +08:00
|
|
|
if (access(magiskpts.data(), F_OK)) {
|
|
|
|
pts = "/dev/pts";
|
|
|
|
ptmx = "/dev/ptmx";
|
|
|
|
} else {
|
|
|
|
pts = magiskpts;
|
|
|
|
ptmx = magiskpts + "/ptmx";
|
|
|
|
}
|
|
|
|
int ptmx_fd = xopen(ptmx.data(), O_RDWR);
|
2021-09-26 01:21:29 +08:00
|
|
|
grantpt(ptmx_fd);
|
|
|
|
unlockpt(ptmx_fd);
|
2021-07-22 23:35:14 +08:00
|
|
|
int pty_num = get_pty_num(ptmx_fd);
|
2021-09-26 01:21:29 +08:00
|
|
|
if (pty_num < 0) {
|
|
|
|
// Kernel issue? Fallback to /dev/pts
|
|
|
|
close(ptmx_fd);
|
|
|
|
pts = "/dev/pts";
|
|
|
|
ptmx_fd = xopen("/dev/ptmx", O_RDWR);
|
|
|
|
grantpt(ptmx_fd);
|
|
|
|
unlockpt(ptmx_fd);
|
|
|
|
pty_num = get_pty_num(ptmx_fd);
|
|
|
|
}
|
|
|
|
send_fd(client, ptmx_fd);
|
2021-07-22 23:35:14 +08:00
|
|
|
close(ptmx_fd);
|
|
|
|
|
|
|
|
string pts_slave = pts + "/" + to_string(pty_num);
|
2021-01-12 00:07:48 -08:00
|
|
|
LOGD("su: pts_slave=[%s]\n", pts_slave.data());
|
2020-12-30 22:11:24 -08:00
|
|
|
|
|
|
|
// Opening the TTY has to occur after the
|
|
|
|
// fork() and setsid() so that it becomes
|
|
|
|
// our controlling TTY and not the daemon's
|
2021-01-12 00:07:48 -08:00
|
|
|
int ptsfd = xopen(pts_slave.data(), O_RDWR);
|
2020-12-30 22:11:24 -08:00
|
|
|
|
|
|
|
if (infd < 0)
|
|
|
|
infd = ptsfd;
|
|
|
|
if (outfd < 0)
|
|
|
|
outfd = ptsfd;
|
|
|
|
if (errfd < 0)
|
|
|
|
errfd = ptsfd;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Swap out stdin, stdout, stderr
|
|
|
|
xdup2(infd, STDIN_FILENO);
|
|
|
|
xdup2(outfd, STDOUT_FILENO);
|
|
|
|
xdup2(errfd, STDERR_FILENO);
|
|
|
|
|
|
|
|
close(infd);
|
|
|
|
close(outfd);
|
|
|
|
close(errfd);
|
|
|
|
close(client);
|
|
|
|
|
|
|
|
// Handle namespaces
|
2023-05-18 02:08:25 +08:00
|
|
|
if (ctx.req.target == -1)
|
|
|
|
ctx.req.target = ctx.pid;
|
|
|
|
else if (ctx.req.target == 0)
|
2025-01-03 11:38:15 -08:00
|
|
|
ctx.info->cfg.mnt_ns = MntNsMode::Global;
|
|
|
|
else if (ctx.info->cfg.mnt_ns == MntNsMode::Global)
|
|
|
|
ctx.info->cfg.mnt_ns = MntNsMode::Requester;
|
2024-12-31 22:52:34 -08:00
|
|
|
switch (ctx.info->cfg.mnt_ns) {
|
2025-01-03 11:38:15 -08:00
|
|
|
case MntNsMode::Global:
|
2020-12-30 22:11:24 -08:00
|
|
|
LOGD("su: use global namespace\n");
|
|
|
|
break;
|
2025-01-03 11:38:15 -08:00
|
|
|
case MntNsMode::Requester:
|
2023-05-18 02:08:25 +08:00
|
|
|
LOGD("su: use namespace of pid=[%d]\n", ctx.req.target);
|
|
|
|
if (switch_mnt_ns(ctx.req.target))
|
2020-12-30 22:11:24 -08:00
|
|
|
LOGD("su: setns failed, fallback to global\n");
|
|
|
|
break;
|
2025-01-03 11:38:15 -08:00
|
|
|
case MntNsMode::Isolate:
|
2020-12-30 22:11:24 -08:00
|
|
|
LOGD("su: use new isolated namespace\n");
|
2023-05-18 02:08:25 +08:00
|
|
|
switch_mnt_ns(ctx.req.target);
|
2020-12-30 22:11:24 -08:00
|
|
|
xunshare(CLONE_NEWNS);
|
|
|
|
xmount(nullptr, "/", nullptr, MS_PRIVATE | MS_REC, nullptr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-01-12 00:07:48 -08:00
|
|
|
const char *argv[4] = { nullptr };
|
2020-12-30 22:11:24 -08:00
|
|
|
|
2021-01-12 00:07:48 -08:00
|
|
|
argv[0] = ctx.req.login ? "-" : ctx.req.shell.data();
|
2020-12-30 22:11:24 -08:00
|
|
|
|
2021-01-12 00:07:48 -08:00
|
|
|
if (!ctx.req.command.empty()) {
|
2020-12-30 22:11:24 -08:00
|
|
|
argv[1] = "-c";
|
2021-01-12 00:07:48 -08:00
|
|
|
argv[2] = ctx.req.command.data();
|
2020-12-30 22:11:24 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Setup environment
|
|
|
|
umask(022);
|
|
|
|
char path[32];
|
2022-09-09 04:29:50 -07:00
|
|
|
ssprintf(path, sizeof(path), "/proc/%d/cwd", ctx.pid);
|
2022-09-21 03:09:46 +02:00
|
|
|
char cwd[4096];
|
|
|
|
if (realpath(path, cwd, sizeof(cwd)) > 0)
|
2021-10-26 22:01:01 +08:00
|
|
|
chdir(cwd);
|
2022-09-09 04:29:50 -07:00
|
|
|
ssprintf(path, sizeof(path), "/proc/%d/environ", ctx.pid);
|
2022-06-17 17:02:44 +08:00
|
|
|
auto env = full_read(path);
|
2020-12-30 22:11:24 -08:00
|
|
|
clearenv();
|
2022-06-17 17:02:44 +08:00
|
|
|
for (size_t pos = 0; pos < env.size(); ++pos) {
|
|
|
|
putenv(env.data() + pos);
|
|
|
|
pos = env.find_first_of('\0', pos);
|
|
|
|
if (pos == std::string::npos)
|
|
|
|
break;
|
2020-12-30 22:11:24 -08:00
|
|
|
}
|
|
|
|
if (!ctx.req.keepenv) {
|
|
|
|
struct passwd *pw;
|
|
|
|
pw = getpwuid(ctx.req.uid);
|
|
|
|
if (pw) {
|
|
|
|
setenv("HOME", pw->pw_dir, 1);
|
|
|
|
setenv("USER", pw->pw_name, 1);
|
|
|
|
setenv("LOGNAME", pw->pw_name, 1);
|
2021-01-12 00:07:48 -08:00
|
|
|
setenv("SHELL", ctx.req.shell.data(), 1);
|
2020-12-30 22:11:24 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unblock all signals
|
|
|
|
sigset_t block_set;
|
|
|
|
sigemptyset(&block_set);
|
|
|
|
sigprocmask(SIG_SETMASK, &block_set, nullptr);
|
2023-09-25 15:10:54 -07:00
|
|
|
if (!ctx.req.context.empty()) {
|
2023-05-18 01:25:05 +08:00
|
|
|
auto f = xopen_file("/proc/self/attr/exec", "we");
|
|
|
|
if (f) fprintf(f.get(), "%s", ctx.req.context.data());
|
|
|
|
}
|
2023-05-16 19:26:44 +08:00
|
|
|
set_identity(ctx.req.uid, ctx.req.gids);
|
2021-01-12 00:07:48 -08:00
|
|
|
execvp(ctx.req.shell.data(), (char **) argv);
|
|
|
|
fprintf(stderr, "Cannot execute %s: %s\n", ctx.req.shell.data(), strerror(errno));
|
2020-12-30 22:11:24 -08:00
|
|
|
PLOGE("exec");
|
2017-04-15 03:21:31 +08:00
|
|
|
}
|