Linux premium155.web-hosting.com 4.18.0-513.11.1.lve.el8.x86_64 #1 SMP Thu Jan 18 16:21:02 UTC 2024 x86_64
LiteSpeed
: 162.0.235.200 | : 18.190.219.178
Cant Read [ /etc/named.conf ]
7.4.33
varifktc
www.github.com/MadExploits
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
CPANEL RESET
CREATE WP USER
README
+ Create Folder
+ Create File
/
lib /
tuned /
cloudlinux-latency-performance /
[ HOME SHELL ]
Name
Size
Permission
Action
tuned.conf
1.92
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : tuned.conf
# # tuned configuration # [main] summary=Optimized Cloudlinux hosting Servers include=throughput-performance [bootloader] cmdline = systemd.unified_cgroup_hierarchy=0 systemd.legacy_systemd_cgroup_controller cgroup.memory=nokmem [cpu] governor=performance energy_perf_bias=performance min_perf_pct=100 # The alternation of CPU bound load and disk IO operations of postgresql # db server suggest CPU to go into powersave mode. # # Explicitly disable deep c-states to reduce latency on OLTP workloads. force_latency=1 [vm] transparent_hugepages=never [sysctl] kernel.numa_balancing = 1 vm.dirty_ratio = 40 vm.dirty_background_ratio = 10 vm.swappiness=10 net.ipv4.tcp_window_scaling = 1 net.ipv4.tcp_timestamps = 1 # Increase kernel buffer size maximums. Currently this seems only necessary at 40Gb speeds. # # The buffer tuning values below do not account for any potential hugepage allocation. # Ensure that you do not oversubscribe system memory. #net.ipv4.tcp_rmem="4096 87380 16777216" #net.ipv4.tcp_wmem="4096 16384 16777216" ## # Busy polling helps reduce latency in the network receive path # by allowing socket layer code to poll the receive queue of a # network device, and disabling network interrupts. # busy_read value greater than 0 enables busy polling. Recommended # net.core.busy_read value is 50. # busy_poll value greater than 0 enables polling globally. # Recommended net.core.busy_poll value is 50 net.core.busy_read=50 net.core.busy_poll=50 # TCP fast open reduces network latency by enabling data exchange # during the sender's initial TCP SYN. The value 3 enables fast open # on client and server connections. net.ipv4.tcp_fastopen=3 #### vm.zone_reclaim_mode=0 [scheduler] sched_min_granularity_ns = 10000000 sched_wakeup_granularity_ns = 15000000 [disk-vm] type=disk devices = vd* elevator = mq-deadline [disk-sas] type=disk devices = sd* elevator = mq-deadline [disk-nvme] type=disk devices = nvme* elevator = none readahead = 0
Close