2017-04-15 03:21:31 +08:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <fcntl.h>
|
2018-10-04 04:59:51 -04:00
|
|
|
#include <pwd.h>
|
2017-04-15 03:21:31 +08:00
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/wait.h>
|
2019-06-04 21:21:27 -07:00
|
|
|
#include <sys/mount.h>
|
2017-04-15 03:21:31 +08:00
|
|
|
|
2021-07-22 23:35:14 +08:00
|
|
|
#include <magisk.hpp>
|
2022-05-12 02:03:42 -07:00
|
|
|
#include <base.hpp>
|
2020-03-09 01:50:30 -07:00
|
|
|
#include <selinux.hpp>
|
2019-02-10 03:57:51 -05:00
|
|
|
|
2020-03-09 01:50:30 -07:00
|
|
|
#include "su.hpp"
|
|
|
|
#include "pts.hpp"
|
2017-04-15 03:21:31 +08:00
|
|
|
|
2019-07-07 00:31:49 -07:00
|
|
|
using namespace std;
|
|
|
|
|
2018-10-03 23:31:15 -04:00
|
|
|
static pthread_mutex_t cache_lock = PTHREAD_MUTEX_INITIALIZER;
|
2019-07-07 00:31:49 -07:00
|
|
|
static shared_ptr<su_info> cached;
|
2018-11-04 03:38:06 -05:00
|
|
|
|
2022-02-06 06:45:58 -08:00
|
|
|
su_info::su_info(int uid) :
|
2022-05-18 01:55:58 -07:00
|
|
|
uid(uid), eval_uid(-1), access(DEFAULT_SU_ACCESS), mgr_uid(-1),
|
2022-02-06 06:45:58 -08:00
|
|
|
timestamp(0), _lock(PTHREAD_MUTEX_INITIALIZER) {}
|
2018-11-04 03:38:06 -05:00
|
|
|
|
|
|
|
su_info::~su_info() {
|
2020-12-30 22:11:24 -08:00
|
|
|
pthread_mutex_destroy(&_lock);
|
2018-11-04 03:38:06 -05:00
|
|
|
}
|
|
|
|
|
2019-09-26 01:49:50 -04:00
|
|
|
mutex_guard su_info::lock() {
|
2020-12-30 22:11:24 -08:00
|
|
|
return mutex_guard(_lock);
|
2018-11-04 03:38:06 -05:00
|
|
|
}
|
2017-05-29 18:54:33 +08:00
|
|
|
|
2019-07-07 00:31:49 -07:00
|
|
|
bool su_info::is_fresh() {
|
2020-12-30 22:11:24 -08:00
|
|
|
timespec ts;
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
|
|
long current = ts.tv_sec * 1000L + ts.tv_nsec / 1000000L;
|
|
|
|
return current - timestamp < 3000; /* 3 seconds */
|
2018-12-26 11:56:49 +08:00
|
|
|
}
|
|
|
|
|
2019-07-07 00:31:49 -07:00
|
|
|
void su_info::refresh() {
|
2020-12-30 22:11:24 -08:00
|
|
|
timespec ts;
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
|
|
timestamp = ts.tv_sec * 1000L + ts.tv_nsec / 1000000L;
|
2017-05-08 03:08:34 +08:00
|
|
|
}
|
2017-04-16 02:28:12 +08:00
|
|
|
|
2022-02-06 06:45:58 -08:00
|
|
|
void su_info::check_db() {
|
|
|
|
eval_uid = uid;
|
|
|
|
get_db_settings(cfg);
|
2020-12-30 22:11:24 -08:00
|
|
|
|
|
|
|
// Check multiuser settings
|
2022-02-06 06:45:58 -08:00
|
|
|
switch (cfg[SU_MULTIUSER_MODE]) {
|
|
|
|
case MULTIUSER_MODE_OWNER_ONLY:
|
|
|
|
if (to_user_id(uid) != 0) {
|
|
|
|
eval_uid = -1;
|
|
|
|
access = NO_SU_ACCESS;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MULTIUSER_MODE_OWNER_MANAGED:
|
|
|
|
eval_uid = to_app_id(uid);
|
|
|
|
break;
|
|
|
|
case MULTIUSER_MODE_USER:
|
|
|
|
default:
|
|
|
|
break;
|
2020-12-30 22:11:24 -08:00
|
|
|
}
|
|
|
|
|
2022-02-06 06:45:58 -08:00
|
|
|
if (eval_uid > 0) {
|
2022-01-17 19:54:33 -08:00
|
|
|
char query[256], *err;
|
2022-09-09 04:29:50 -07:00
|
|
|
ssprintf(query, sizeof(query),
|
2022-01-17 19:54:33 -08:00
|
|
|
"SELECT policy, logging, notification FROM policies "
|
2022-02-06 06:45:58 -08:00
|
|
|
"WHERE uid=%d AND (until=0 OR until>%li)", eval_uid, time(nullptr));
|
2022-01-17 19:54:33 -08:00
|
|
|
err = db_exec(query, [&](db_row &row) -> bool {
|
2022-02-06 06:45:58 -08:00
|
|
|
access.policy = (policy_t) parse_int(row["policy"]);
|
|
|
|
access.log = parse_int(row["logging"]);
|
|
|
|
access.notify = parse_int(row["notification"]);
|
2022-01-17 19:54:33 -08:00
|
|
|
LOGD("magiskdb: query policy=[%d] log=[%d] notify=[%d]\n",
|
2022-02-06 06:45:58 -08:00
|
|
|
access.policy, access.log, access.notify);
|
2022-01-17 19:54:33 -08:00
|
|
|
return true;
|
|
|
|
});
|
|
|
|
db_err_cmd(err, return);
|
|
|
|
}
|
2020-12-30 22:11:24 -08:00
|
|
|
|
|
|
|
// We need to check our manager
|
2022-05-29 23:31:57 -07:00
|
|
|
if (access.log || access.notify) {
|
|
|
|
check_pkg_refresh();
|
2022-05-19 22:54:49 -07:00
|
|
|
mgr_uid = get_manager(to_user_id(eval_uid), &mgr_pkg, true);
|
2022-05-29 23:31:57 -07:00
|
|
|
}
|
2018-06-13 04:33:32 +08:00
|
|
|
}
|
2017-05-29 18:54:33 +08:00
|
|
|
|
2022-01-17 19:54:33 -08:00
|
|
|
bool uid_granted_root(int uid) {
|
2022-05-30 02:09:07 -07:00
|
|
|
if (uid == AID_ROOT)
|
2022-01-17 19:54:33 -08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
db_settings cfg;
|
|
|
|
get_db_settings(cfg);
|
|
|
|
|
|
|
|
// Check user root access settings
|
|
|
|
switch (cfg[ROOT_ACCESS]) {
|
|
|
|
case ROOT_ACCESS_DISABLED:
|
|
|
|
return false;
|
|
|
|
case ROOT_ACCESS_APPS_ONLY:
|
2022-05-30 02:09:07 -07:00
|
|
|
if (uid == AID_SHELL)
|
2022-01-17 19:54:33 -08:00
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
case ROOT_ACCESS_ADB_ONLY:
|
2022-05-30 02:09:07 -07:00
|
|
|
if (uid != AID_SHELL)
|
2022-01-17 19:54:33 -08:00
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
case ROOT_ACCESS_APPS_AND_ADB:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check multiuser settings
|
|
|
|
switch (cfg[SU_MULTIUSER_MODE]) {
|
|
|
|
case MULTIUSER_MODE_OWNER_ONLY:
|
|
|
|
if (to_user_id(uid) != 0)
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
case MULTIUSER_MODE_OWNER_MANAGED:
|
|
|
|
uid = to_app_id(uid);
|
|
|
|
break;
|
|
|
|
case MULTIUSER_MODE_USER:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool granted = false;
|
|
|
|
|
|
|
|
char query[256], *err;
|
2022-09-09 04:29:50 -07:00
|
|
|
ssprintf(query, sizeof(query),
|
2022-01-17 19:54:33 -08:00
|
|
|
"SELECT policy FROM policies WHERE uid=%d AND (until=0 OR until>%li)",
|
|
|
|
uid, time(nullptr));
|
|
|
|
err = db_exec(query, [&](db_row &row) -> bool {
|
|
|
|
granted = parse_int(row["policy"]) == ALLOW;
|
|
|
|
return true;
|
|
|
|
});
|
|
|
|
db_err_cmd(err, return false);
|
|
|
|
|
|
|
|
return granted;
|
|
|
|
}
|
|
|
|
|
2022-05-28 22:39:44 -07:00
|
|
|
void prune_su_access() {
|
2022-05-29 23:31:57 -07:00
|
|
|
cached.reset();
|
2022-05-18 01:55:58 -07:00
|
|
|
vector<bool> app_no_list = get_app_no_list();
|
|
|
|
vector<int> rm_uids;
|
|
|
|
char query[256], *err;
|
2022-09-15 01:17:05 -07:00
|
|
|
strscpy(query, "SELECT uid FROM policies", sizeof(query));
|
2022-05-18 01:55:58 -07:00
|
|
|
err = db_exec(query, [&](db_row &row) -> bool {
|
|
|
|
int uid = parse_int(row["uid"]);
|
|
|
|
int app_id = to_app_id(uid);
|
|
|
|
if (app_id >= AID_APP_START && app_id <= AID_APP_END) {
|
|
|
|
int app_no = app_id - AID_APP_START;
|
|
|
|
if (app_no >= app_no_list.size() || !app_no_list[app_no]) {
|
|
|
|
// The app_id is no longer installed
|
|
|
|
rm_uids.push_back(uid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
});
|
|
|
|
db_err_cmd(err, return);
|
|
|
|
|
|
|
|
for (int uid : rm_uids) {
|
2022-09-09 04:29:50 -07:00
|
|
|
ssprintf(query, sizeof(query), "DELETE FROM policies WHERE uid == %d", uid);
|
2022-05-18 01:55:58 -07:00
|
|
|
// Don't care about errors
|
|
|
|
db_exec(query);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-13 14:05:28 -04:00
|
|
|
static shared_ptr<su_info> get_su_info(unsigned uid) {
|
2020-12-30 22:11:24 -08:00
|
|
|
LOGD("su: request from uid=[%d]\n", uid);
|
|
|
|
|
Fix race condition when switching root manager
Before this change, the root manager package name is only written into
the database after the repackaged APK is installed. In the time between
the repackaged APK being installed and the package name being written
into the database, if some operation calls `get_manager`, the Magisk
daemon will cache this result and ignore the repackaged APK, even if
the package name is set afterwards, because the cache won't be
invalidated. The result is that the repackaged manager APK will not be
recognized as the root manager, breaking the hide manager feature.
This race condition is more likely to happen when Zygisk is enabled,
because `get_manager` is called with a very high frequency in that case.
To fix the issue, we have to set the new package name into the database
BEFORE installing the repackaged APK. We also stop pruning the
database if the repackaged manager is not found, moving this logic into
the Magisk app. By doing so, we can guarantee that the instant after
the repackaged manager APK is installed, the Magisk daemon will
immediately pick it up and treat it as the root manager.
Another small optimization: when the requester is root, simply bypass
the whole database + manager package check. Since the Magisk app hiding
APK installation proces will call `su` several times to run `pm` under
different UIDs, doing this opimization will reduce the amount of
unnecessary database query + filesystem traversals.
2023-03-06 03:25:59 -08:00
|
|
|
if (uid == AID_ROOT) {
|
|
|
|
auto info = make_shared<su_info>(uid);
|
|
|
|
info->access = SILENT_SU_ACCESS;
|
|
|
|
return info;
|
|
|
|
}
|
2020-12-30 22:11:24 -08:00
|
|
|
|
Fix race condition when switching root manager
Before this change, the root manager package name is only written into
the database after the repackaged APK is installed. In the time between
the repackaged APK being installed and the package name being written
into the database, if some operation calls `get_manager`, the Magisk
daemon will cache this result and ignore the repackaged APK, even if
the package name is set afterwards, because the cache won't be
invalidated. The result is that the repackaged manager APK will not be
recognized as the root manager, breaking the hide manager feature.
This race condition is more likely to happen when Zygisk is enabled,
because `get_manager` is called with a very high frequency in that case.
To fix the issue, we have to set the new package name into the database
BEFORE installing the repackaged APK. We also stop pruning the
database if the repackaged manager is not found, moving this logic into
the Magisk app. By doing so, we can guarantee that the instant after
the repackaged manager APK is installed, the Magisk daemon will
immediately pick it up and treat it as the root manager.
Another small optimization: when the requester is root, simply bypass
the whole database + manager package check. Since the Magisk app hiding
APK installation proces will call `su` several times to run `pm` under
different UIDs, doing this opimization will reduce the amount of
unnecessary database query + filesystem traversals.
2023-03-06 03:25:59 -08:00
|
|
|
shared_ptr<su_info> info;
|
2020-12-30 22:11:24 -08:00
|
|
|
{
|
|
|
|
mutex_guard lock(cache_lock);
|
|
|
|
if (!cached || cached->uid != uid || !cached->is_fresh())
|
|
|
|
cached = make_shared<su_info>(uid);
|
|
|
|
cached->refresh();
|
|
|
|
info = cached;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_guard lock = info->lock();
|
|
|
|
|
|
|
|
if (info->access.policy == QUERY) {
|
|
|
|
// Not cached, get data from database
|
2022-02-06 06:45:58 -08:00
|
|
|
info->check_db();
|
2020-12-30 22:11:24 -08:00
|
|
|
|
Fix race condition when switching root manager
Before this change, the root manager package name is only written into
the database after the repackaged APK is installed. In the time between
the repackaged APK being installed and the package name being written
into the database, if some operation calls `get_manager`, the Magisk
daemon will cache this result and ignore the repackaged APK, even if
the package name is set afterwards, because the cache won't be
invalidated. The result is that the repackaged manager APK will not be
recognized as the root manager, breaking the hide manager feature.
This race condition is more likely to happen when Zygisk is enabled,
because `get_manager` is called with a very high frequency in that case.
To fix the issue, we have to set the new package name into the database
BEFORE installing the repackaged APK. We also stop pruning the
database if the repackaged manager is not found, moving this logic into
the Magisk app. By doing so, we can guarantee that the instant after
the repackaged manager APK is installed, the Magisk daemon will
immediately pick it up and treat it as the root manager.
Another small optimization: when the requester is root, simply bypass
the whole database + manager package check. Since the Magisk app hiding
APK installation proces will call `su` several times to run `pm` under
different UIDs, doing this opimization will reduce the amount of
unnecessary database query + filesystem traversals.
2023-03-06 03:25:59 -08:00
|
|
|
// If it's the manager, allow it silently
|
|
|
|
if (to_app_id(info->uid) == to_app_id(info->mgr_uid)) {
|
2020-12-30 22:11:24 -08:00
|
|
|
info->access = SILENT_SU_ACCESS;
|
|
|
|
return info;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check su access settings
|
|
|
|
switch (info->cfg[ROOT_ACCESS]) {
|
|
|
|
case ROOT_ACCESS_DISABLED:
|
|
|
|
LOGW("Root access is disabled!\n");
|
|
|
|
info->access = NO_SU_ACCESS;
|
|
|
|
break;
|
|
|
|
case ROOT_ACCESS_ADB_ONLY:
|
2022-05-30 02:09:07 -07:00
|
|
|
if (info->uid != AID_SHELL) {
|
2020-12-30 22:11:24 -08:00
|
|
|
LOGW("Root access limited to ADB only!\n");
|
|
|
|
info->access = NO_SU_ACCESS;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ROOT_ACCESS_APPS_ONLY:
|
2022-05-30 02:09:07 -07:00
|
|
|
if (info->uid == AID_SHELL) {
|
2020-12-30 22:11:24 -08:00
|
|
|
LOGW("Root access is disabled for ADB!\n");
|
|
|
|
info->access = NO_SU_ACCESS;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ROOT_ACCESS_APPS_AND_ADB:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info->access.policy != QUERY)
|
|
|
|
return info;
|
|
|
|
|
|
|
|
// If still not determined, check if manager exists
|
2022-05-18 01:55:58 -07:00
|
|
|
if (info->mgr_uid < 0) {
|
2020-12-30 22:11:24 -08:00
|
|
|
info->access = NO_SU_ACCESS;
|
|
|
|
return info;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return info;
|
2018-06-13 04:33:32 +08:00
|
|
|
}
|
|
|
|
|
2020-09-10 00:38:29 -07:00
|
|
|
// Set effective uid back to root, otherwise setres[ug]id will fail if uid isn't root
|
2023-05-16 19:26:44 +08:00
|
|
|
static void set_identity(uid_t uid, const std::vector<uid_t> &groups) {
|
2020-12-30 22:11:24 -08:00
|
|
|
if (seteuid(0)) {
|
|
|
|
PLOGE("seteuid (root)");
|
|
|
|
}
|
2023-05-16 19:26:44 +08:00
|
|
|
gid_t gid;
|
|
|
|
if (groups.size() > 0) {
|
|
|
|
if (setgroups(groups.size(), groups.data())) {
|
|
|
|
PLOGE("setgroups");
|
|
|
|
}
|
|
|
|
gid = groups[0];
|
|
|
|
} else {
|
|
|
|
gid = uid;
|
|
|
|
}
|
|
|
|
if (setresgid(gid, gid, gid)) {
|
2020-12-30 22:11:24 -08:00
|
|
|
PLOGE("setresgid (%u)", uid);
|
|
|
|
}
|
|
|
|
if (setresuid(uid, uid, uid)) {
|
|
|
|
PLOGE("setresuid (%u)", uid);
|
|
|
|
}
|
2018-10-04 04:59:51 -04:00
|
|
|
}
|
|
|
|
|
2021-10-19 23:46:38 -07:00
|
|
|
void su_daemon_handler(int client, const sock_cred *cred) {
|
|
|
|
LOGD("su: request from pid=[%d], client=[%d]\n", cred->pid, client);
|
2020-12-30 22:11:24 -08:00
|
|
|
|
|
|
|
su_context ctx = {
|
2021-10-19 23:46:38 -07:00
|
|
|
.info = get_su_info(cred->uid),
|
2021-01-12 00:07:48 -08:00
|
|
|
.req = su_request(),
|
2021-10-19 23:46:38 -07:00
|
|
|
.pid = cred->pid
|
2020-12-30 22:11:24 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
// Read su_request
|
2022-04-29 23:44:02 +08:00
|
|
|
if (xxread(client, &ctx.req, sizeof(su_req_base)) < 0
|
|
|
|
|| !read_string(client, ctx.req.shell)
|
2023-05-16 19:26:44 +08:00
|
|
|
|| !read_string(client, ctx.req.command)
|
|
|
|
|| !read_vector(client, ctx.req.gids)) {
|
2022-04-03 13:09:23 +08:00
|
|
|
LOGW("su: remote process probably died, abort\n");
|
|
|
|
ctx.info.reset();
|
|
|
|
write_int(client, DENY);
|
|
|
|
close(client);
|
|
|
|
return;
|
|
|
|
}
|
2020-12-30 22:11:24 -08:00
|
|
|
|
2022-03-25 13:08:13 -07:00
|
|
|
// If still not determined, ask manager
|
|
|
|
if (ctx.info->access.policy == QUERY) {
|
|
|
|
int fd = app_request(ctx);
|
|
|
|
if (fd < 0) {
|
|
|
|
ctx.info->access.policy = DENY;
|
|
|
|
} else {
|
|
|
|
int ret = read_int_be(fd);
|
|
|
|
ctx.info->access.policy = ret < 0 ? DENY : static_cast<policy_t>(ret);
|
|
|
|
close(fd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-30 22:11:24 -08:00
|
|
|
if (ctx.info->access.log)
|
|
|
|
app_log(ctx);
|
|
|
|
else if (ctx.info->access.notify)
|
|
|
|
app_notify(ctx);
|
|
|
|
|
|
|
|
// Fail fast
|
|
|
|
if (ctx.info->access.policy == DENY) {
|
|
|
|
LOGW("su: request rejected (%u)\n", ctx.info->uid);
|
|
|
|
ctx.info.reset();
|
|
|
|
write_int(client, DENY);
|
|
|
|
close(client);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fork a child root process
|
|
|
|
//
|
|
|
|
// The child process will need to setsid, open a pseudo-terminal
|
|
|
|
// if needed, and eventually exec shell.
|
|
|
|
// The parent process will wait for the result and
|
|
|
|
// send the return code back to our client.
|
|
|
|
|
|
|
|
if (int child = xfork(); child) {
|
|
|
|
ctx.info.reset();
|
|
|
|
|
|
|
|
// Wait result
|
|
|
|
LOGD("su: waiting child pid=[%d]\n", child);
|
|
|
|
int status, code;
|
|
|
|
|
|
|
|
if (waitpid(child, &status, 0) > 0)
|
|
|
|
code = WEXITSTATUS(status);
|
|
|
|
else
|
|
|
|
code = -1;
|
|
|
|
|
|
|
|
LOGD("su: return code=[%d]\n", code);
|
|
|
|
write(client, &code, sizeof(code));
|
|
|
|
close(client);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOGD("su: fork handler\n");
|
|
|
|
|
|
|
|
// Abort upon any error occurred
|
2022-07-01 04:53:41 -07:00
|
|
|
exit_on_error(true);
|
2020-12-30 22:11:24 -08:00
|
|
|
|
|
|
|
// ack
|
|
|
|
write_int(client, 0);
|
|
|
|
|
|
|
|
// Become session leader
|
|
|
|
xsetsid();
|
|
|
|
|
|
|
|
// The FDs for each of the streams
|
2021-07-22 23:35:14 +08:00
|
|
|
int infd = recv_fd(client);
|
2020-12-30 22:11:24 -08:00
|
|
|
int outfd = recv_fd(client);
|
|
|
|
int errfd = recv_fd(client);
|
|
|
|
|
2021-07-22 23:35:14 +08:00
|
|
|
// App need a PTY
|
|
|
|
if (read_int(client)) {
|
|
|
|
string pts;
|
|
|
|
string ptmx;
|
|
|
|
auto magiskpts = MAGISKTMP + "/" SHELLPTS;
|
|
|
|
if (access(magiskpts.data(), F_OK)) {
|
|
|
|
pts = "/dev/pts";
|
|
|
|
ptmx = "/dev/ptmx";
|
|
|
|
} else {
|
|
|
|
pts = magiskpts;
|
|
|
|
ptmx = magiskpts + "/ptmx";
|
|
|
|
}
|
|
|
|
int ptmx_fd = xopen(ptmx.data(), O_RDWR);
|
2021-09-26 01:21:29 +08:00
|
|
|
grantpt(ptmx_fd);
|
|
|
|
unlockpt(ptmx_fd);
|
2021-07-22 23:35:14 +08:00
|
|
|
int pty_num = get_pty_num(ptmx_fd);
|
2021-09-26 01:21:29 +08:00
|
|
|
if (pty_num < 0) {
|
|
|
|
// Kernel issue? Fallback to /dev/pts
|
|
|
|
close(ptmx_fd);
|
|
|
|
pts = "/dev/pts";
|
|
|
|
ptmx_fd = xopen("/dev/ptmx", O_RDWR);
|
|
|
|
grantpt(ptmx_fd);
|
|
|
|
unlockpt(ptmx_fd);
|
|
|
|
pty_num = get_pty_num(ptmx_fd);
|
|
|
|
}
|
|
|
|
send_fd(client, ptmx_fd);
|
2021-07-22 23:35:14 +08:00
|
|
|
close(ptmx_fd);
|
|
|
|
|
|
|
|
string pts_slave = pts + "/" + to_string(pty_num);
|
2021-01-12 00:07:48 -08:00
|
|
|
LOGD("su: pts_slave=[%s]\n", pts_slave.data());
|
2020-12-30 22:11:24 -08:00
|
|
|
|
|
|
|
// Opening the TTY has to occur after the
|
|
|
|
// fork() and setsid() so that it becomes
|
|
|
|
// our controlling TTY and not the daemon's
|
2021-01-12 00:07:48 -08:00
|
|
|
int ptsfd = xopen(pts_slave.data(), O_RDWR);
|
2020-12-30 22:11:24 -08:00
|
|
|
|
|
|
|
if (infd < 0)
|
|
|
|
infd = ptsfd;
|
|
|
|
if (outfd < 0)
|
|
|
|
outfd = ptsfd;
|
|
|
|
if (errfd < 0)
|
|
|
|
errfd = ptsfd;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Swap out stdin, stdout, stderr
|
|
|
|
xdup2(infd, STDIN_FILENO);
|
|
|
|
xdup2(outfd, STDOUT_FILENO);
|
|
|
|
xdup2(errfd, STDERR_FILENO);
|
|
|
|
|
|
|
|
close(infd);
|
|
|
|
close(outfd);
|
|
|
|
close(errfd);
|
|
|
|
close(client);
|
|
|
|
|
|
|
|
// Handle namespaces
|
|
|
|
if (ctx.req.mount_master)
|
|
|
|
ctx.info->cfg[SU_MNT_NS] = NAMESPACE_MODE_GLOBAL;
|
|
|
|
switch (ctx.info->cfg[SU_MNT_NS]) {
|
|
|
|
case NAMESPACE_MODE_GLOBAL:
|
|
|
|
LOGD("su: use global namespace\n");
|
|
|
|
break;
|
|
|
|
case NAMESPACE_MODE_REQUESTER:
|
|
|
|
LOGD("su: use namespace of pid=[%d]\n", ctx.pid);
|
|
|
|
if (switch_mnt_ns(ctx.pid))
|
|
|
|
LOGD("su: setns failed, fallback to global\n");
|
|
|
|
break;
|
|
|
|
case NAMESPACE_MODE_ISOLATE:
|
|
|
|
LOGD("su: use new isolated namespace\n");
|
|
|
|
switch_mnt_ns(ctx.pid);
|
|
|
|
xunshare(CLONE_NEWNS);
|
|
|
|
xmount(nullptr, "/", nullptr, MS_PRIVATE | MS_REC, nullptr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-01-12 00:07:48 -08:00
|
|
|
const char *argv[4] = { nullptr };
|
2020-12-30 22:11:24 -08:00
|
|
|
|
2021-01-12 00:07:48 -08:00
|
|
|
argv[0] = ctx.req.login ? "-" : ctx.req.shell.data();
|
2020-12-30 22:11:24 -08:00
|
|
|
|
2021-01-12 00:07:48 -08:00
|
|
|
if (!ctx.req.command.empty()) {
|
2020-12-30 22:11:24 -08:00
|
|
|
argv[1] = "-c";
|
2021-01-12 00:07:48 -08:00
|
|
|
argv[2] = ctx.req.command.data();
|
2020-12-30 22:11:24 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Setup environment
|
|
|
|
umask(022);
|
|
|
|
char path[32];
|
2022-09-09 04:29:50 -07:00
|
|
|
ssprintf(path, sizeof(path), "/proc/%d/cwd", ctx.pid);
|
2022-09-21 03:09:46 +02:00
|
|
|
char cwd[4096];
|
|
|
|
if (realpath(path, cwd, sizeof(cwd)) > 0)
|
2021-10-26 22:01:01 +08:00
|
|
|
chdir(cwd);
|
2022-09-09 04:29:50 -07:00
|
|
|
ssprintf(path, sizeof(path), "/proc/%d/environ", ctx.pid);
|
2022-06-17 17:02:44 +08:00
|
|
|
auto env = full_read(path);
|
2020-12-30 22:11:24 -08:00
|
|
|
clearenv();
|
2022-06-17 17:02:44 +08:00
|
|
|
for (size_t pos = 0; pos < env.size(); ++pos) {
|
|
|
|
putenv(env.data() + pos);
|
|
|
|
pos = env.find_first_of('\0', pos);
|
|
|
|
if (pos == std::string::npos)
|
|
|
|
break;
|
2020-12-30 22:11:24 -08:00
|
|
|
}
|
|
|
|
if (!ctx.req.keepenv) {
|
|
|
|
struct passwd *pw;
|
|
|
|
pw = getpwuid(ctx.req.uid);
|
|
|
|
if (pw) {
|
|
|
|
setenv("HOME", pw->pw_dir, 1);
|
|
|
|
setenv("USER", pw->pw_name, 1);
|
|
|
|
setenv("LOGNAME", pw->pw_name, 1);
|
2021-01-12 00:07:48 -08:00
|
|
|
setenv("SHELL", ctx.req.shell.data(), 1);
|
2020-12-30 22:11:24 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unblock all signals
|
|
|
|
sigset_t block_set;
|
|
|
|
sigemptyset(&block_set);
|
|
|
|
sigprocmask(SIG_SETMASK, &block_set, nullptr);
|
2023-05-16 19:26:44 +08:00
|
|
|
set_identity(ctx.req.uid, ctx.req.gids);
|
2021-01-12 00:07:48 -08:00
|
|
|
execvp(ctx.req.shell.data(), (char **) argv);
|
|
|
|
fprintf(stderr, "Cannot execute %s: %s\n", ctx.req.shell.data(), strerror(errno));
|
2020-12-30 22:11:24 -08:00
|
|
|
PLOGE("exec");
|
2017-04-15 03:21:31 +08:00
|
|
|
}
|