xref: /xnu-8796.121.2/osfmk/vm/analytics.c (revision c54f35ca767986246321eb901baf8f5ff7923f6a)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * Telemetry from the VM is usually colected at a daily cadence.
31  * All of those events are in this file along with a single thread
32  * call for reporting them.
33  *
34  * NB: The freezer subsystem has its own telemetry based on its budget interval
35  * so it's not included here.
36  */
37 
38 #include <kern/thread_call.h>
39 #include <libkern/coreanalytics/coreanalytics.h>
40 #include <os/log.h>
41 #include <vm/vm_page.h>
42 
43 #include "vm_compressor_backing_store.h"
44 
45 void vm_analytics_tick(void *arg0, void *arg1);
46 
47 #define ANALYTICS_PERIOD_HOURS (24ULL)
48 
49 static thread_call_t vm_analytics_thread_call;
50 
51 CA_EVENT(vm_swapusage,
52     CA_INT, max_alloced,
53     CA_INT, max_used,
54     CA_INT, trial_deployment_id,
55     CA_STATIC_STRING(CA_UUID_LEN), trial_treatment_id,
56     CA_STATIC_STRING(CA_UUID_LEN), trial_experiment_id);
57 
58 CA_EVENT(mlock_failures,
59     CA_INT, over_global_limit,
60     CA_INT, over_user_limit,
61     CA_INT, trial_deployment_id,
62     CA_STATIC_STRING(CA_UUID_LEN), trial_treatment_id,
63     CA_STATIC_STRING(CA_UUID_LEN), trial_experiment_id);
64 
65 /*
66  * NB: It's a good practice to include these trial
67  * identifiers in all of our events so that we can
68  * measure the impact of any A/B tests on these metrics.
69  */
70 extern uuid_string_t trial_treatment_id;
71 extern uuid_string_t trial_experiment_id;
72 extern int trial_deployment_id;
73 
74 static void
add_trial_uuids(char * treatment_id,char * experiment_id)75 add_trial_uuids(char *treatment_id, char *experiment_id)
76 {
77 	strlcpy(treatment_id, trial_treatment_id, CA_UUID_LEN);
78 	strlcpy(experiment_id, trial_experiment_id, CA_UUID_LEN);
79 }
80 
81 static void
report_vm_swapusage()82 report_vm_swapusage()
83 {
84 	uint64_t max_alloced, max_used;
85 	ca_event_t event = CA_EVENT_ALLOCATE(vm_swapusage);
86 	CA_EVENT_TYPE(vm_swapusage) * e = event->data;
87 
88 	vm_swap_reset_max_segs_tracking(&max_alloced, &max_used);
89 	e->max_alloced = max_alloced;
90 	e->max_used = max_used;
91 	add_trial_uuids(e->trial_treatment_id, e->trial_experiment_id);
92 	e->trial_deployment_id = trial_deployment_id;
93 	CA_EVENT_SEND(event);
94 }
95 
96 static void
report_mlock_failures()97 report_mlock_failures()
98 {
99 	ca_event_t event = CA_EVENT_ALLOCATE(mlock_failures);
100 	CA_EVENT_TYPE(mlock_failures) * e = event->data;
101 
102 	e->over_global_limit = os_atomic_load_wide(&vm_add_wire_count_over_global_limit, relaxed);
103 	e->over_user_limit = os_atomic_load_wide(&vm_add_wire_count_over_user_limit, relaxed);
104 
105 	os_atomic_store_wide(&vm_add_wire_count_over_global_limit, 0, relaxed);
106 	os_atomic_store_wide(&vm_add_wire_count_over_user_limit, 0, relaxed);
107 
108 	add_trial_uuids(e->trial_treatment_id, e->trial_experiment_id);
109 	e->trial_deployment_id = trial_deployment_id;
110 	CA_EVENT_SEND(event);
111 }
112 
113 #if XNU_TARGET_OS_WATCH
114 CA_EVENT(compressor_age,
115     CA_INT, hour1,
116     CA_INT, hour6,
117     CA_INT, hour12,
118     CA_INT, hour24,
119     CA_INT, hour36,
120     CA_INT, hour48,
121     CA_INT, hourMax,
122     CA_INT, trial_deployment_id,
123     CA_STATIC_STRING(CA_UUID_LEN), trial_treatment_id,
124     CA_STATIC_STRING(CA_UUID_LEN), trial_experiment_id);
125 
126 /**
127  * Compressor age bucket descriptor.
128  */
129 typedef struct {
130 	/* Number of segments in this bucket. */
131 	uint64_t count;
132 	/* The bucket's lower bound (inclusive) */
133 	uint64_t lower;
134 	/* The bucket's upper bound (exclusive) */
135 	uint64_t upper;
136 } c_reporting_bucket_t;
137 #define C_REPORTING_BUCKETS_MAX (UINT64_MAX)
138 #ifndef ARRAY_SIZE
139 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
140 #endif
141 #define HR_TO_S(x) ((x) * 60 * 60)
142 
143 /**
144  * Report the age of segments in the compressor.
145  */
146 static void
report_compressor_age()147 report_compressor_age()
148 {
149 	/* If the compressor is not configured, do nothing and return early. */
150 	if (vm_compressor_mode == VM_PAGER_NOT_CONFIGURED) {
151 		os_log(OS_LOG_DEFAULT, "%s: vm_compressor_mode == VM_PAGER_NOT_CONFIGURED, returning early", __func__);
152 		return;
153 	}
154 
155 	const queue_head_t *c_queues[] = {&c_age_list_head, &c_major_list_head};
156 	c_reporting_bucket_t c_buckets[] = {
157 		{.count = 0, .lower = HR_TO_S(0), .upper = HR_TO_S(1)},  /* [0, 1) hours */
158 		{.count = 0, .lower = HR_TO_S(1), .upper = HR_TO_S(6)},  /* [1, 6) hours */
159 		{.count = 0, .lower = HR_TO_S(6), .upper = HR_TO_S(12)},  /* [6, 12) hours */
160 		{.count = 0, .lower = HR_TO_S(12), .upper = HR_TO_S(24)}, /* [12, 24) hours */
161 		{.count = 0, .lower = HR_TO_S(24), .upper = HR_TO_S(36)}, /* [24, 36) hours */
162 		{.count = 0, .lower = HR_TO_S(36), .upper = HR_TO_S(48)}, /* [36, 48) hours */
163 		{.count = 0, .lower = HR_TO_S(48), .upper = C_REPORTING_BUCKETS_MAX}, /* [48, MAX) hours */
164 	};
165 	clock_sec_t now;
166 	clock_nsec_t nsec;
167 
168 	/* Collect the segments and update the bucket counts. */
169 	lck_mtx_lock_spin_always(c_list_lock);
170 	for (unsigned q = 0; q < ARRAY_SIZE(c_queues); q++) {
171 		c_segment_t c_seg = (c_segment_t) queue_first(c_queues[q]);
172 		while (!queue_end(c_queues[q], (queue_entry_t) c_seg)) {
173 			for (unsigned b = 0; b < ARRAY_SIZE(c_buckets); b++) {
174 				uint32_t creation_ts = c_seg->c_creation_ts;
175 				clock_get_system_nanotime(&now, &nsec);
176 				clock_sec_t age = now - creation_ts;
177 				if ((age >= c_buckets[b].lower) &&
178 				    (age < c_buckets[b].upper)) {
179 					c_buckets[b].count++;
180 					break;
181 				}
182 			}
183 			c_seg = (c_segment_t) queue_next(&c_seg->c_age_list);
184 		}
185 	}
186 	lck_mtx_unlock_always(c_list_lock);
187 
188 	/* Send the ages to CoreAnalytics. */
189 	ca_event_t event = CA_EVENT_ALLOCATE(compressor_age);
190 	CA_EVENT_TYPE(compressor_age) * e = event->data;
191 	e->hour1 = c_buckets[0].count;
192 	e->hour6 = c_buckets[1].count;
193 	e->hour12 = c_buckets[2].count;
194 	e->hour24 = c_buckets[3].count;
195 	e->hour36 = c_buckets[4].count;
196 	e->hour48 = c_buckets[5].count;
197 	e->hourMax = c_buckets[6].count;
198 	add_trial_uuids(e->trial_treatment_id, e->trial_experiment_id);
199 	e->trial_deployment_id = trial_deployment_id;
200 	CA_EVENT_SEND(event);
201 }
202 #endif /* XNU_TARGET_OS_WATCH */
203 
204 static void
schedule_analytics_thread_call()205 schedule_analytics_thread_call()
206 {
207 	static const uint64_t analytics_period_ns = ANALYTICS_PERIOD_HOURS * 60 * 60 * NSEC_PER_SEC;
208 	uint64_t analytics_period_absolutetime;
209 	nanoseconds_to_absolutetime(analytics_period_ns, &analytics_period_absolutetime);
210 
211 	thread_call_enter_delayed(vm_analytics_thread_call, analytics_period_absolutetime + mach_absolute_time());
212 }
213 
214 /*
215  * This is the main entry point for reporting periodic analytics.
216  * It's called once every ANALYTICS_PERIOD_HOURS hours.
217  */
218 void
vm_analytics_tick(void * arg0,void * arg1)219 vm_analytics_tick(void *arg0, void *arg1)
220 {
221 #pragma unused(arg0, arg1)
222 	report_vm_swapusage();
223 	report_mlock_failures();
224 #if XNU_TARGET_OS_WATCH
225 	report_compressor_age();
226 #endif /* XNU_TARGET_OS_WATCH */
227 	schedule_analytics_thread_call();
228 }
229 
230 static void
vm_analytics_init()231 vm_analytics_init()
232 {
233 	vm_analytics_thread_call = thread_call_allocate_with_options(vm_analytics_tick, NULL, THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
234 	schedule_analytics_thread_call();
235 }
236 
237 STARTUP(THREAD_CALL, STARTUP_RANK_MIDDLE, vm_analytics_init);
238