@@ -1,2 +1,2 @@
-Source: http://ports.ubuntu.com/pool/multiverse/v/virtualbox/virtualbox-guest-dkms_5.2.6-dfsg-2_all.deb
-Version: 5.2.6-dfsg-2
+Source: http://ports.ubuntu.com/pool/multiverse/v/virtualbox/virtualbox-guest-dkms_5.2.18-dfsg-2_all.deb
+Version: 5.2.18-dfsg-2
@@ -1,5 +1,5 @@
PACKAGE_NAME="virtualbox-guest"
-PACKAGE_VERSION="5.2.6"
+PACKAGE_VERSION="5.2.18"
CLEAN="rm -f *.*o"
BUILT_MODULE_NAME[0]="vboxguest"
BUILT_MODULE_LOCATION[0]="vboxguest"
@@ -1,4 +1,4 @@
-/* $Rev: 118839 $ */
+/* $Rev: 120349 $ */
/** @file
* VBoxGuest - Linux specifics.
*
@@ -39,6 +39,12 @@
# define VBOXGUEST_WITH_INPUT_DRIVER
#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+# define CONST_4_15 const
+#else
+# define CONST_4_15
+#endif
+
#include "VBoxGuestInternal.h"
#ifdef VBOXGUEST_WITH_INPUT_DRIVER
# include <linux/input.h>
@@ -1058,11 +1064,7 @@ void VGDrvNativeISRMousePollEvent(PVBOXGUESTDEVEXT pDevExt)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
/** log and dbg_log parameter setter. */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
-static int vgdrvLinuxParamLogGrpSet(const char *pszValue, const struct kernel_param *pParam)
-#else
-static int vgdrvLinuxParamLogGrpSet(const char *pszValue, struct kernel_param *pParam)
-#endif
+static int vgdrvLinuxParamLogGrpSet(const char *pszValue, CONST_4_15 struct kernel_param *pParam)
{
if (g_fLoggerCreated)
{
@@ -1077,11 +1079,7 @@ static int vgdrvLinuxParamLogGrpSet(const char *pszValue, struct kernel_param *p
}
/** log and dbg_log parameter getter. */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
-static int vgdrvLinuxParamLogGrpGet(char *pszBuf, const struct kernel_param *pParam)
-#else
-static int vgdrvLinuxParamLogGrpGet(char *pszBuf, struct kernel_param *pParam)
-#endif
+static int vgdrvLinuxParamLogGrpGet(char *pszBuf, CONST_4_15 struct kernel_param *pParam)
{
PRTLOGGER pLogger = pParam->name[0] == 'd' ? RTLogDefaultInstance() : RTLogRelGetDefaultInstance();
*pszBuf = '\0';
@@ -1092,11 +1090,7 @@ static int vgdrvLinuxParamLogGrpGet(char *pszBuf, struct kernel_param *pParam)
/** log and dbg_log_flags parameter setter. */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
-static int vgdrvLinuxParamLogFlagsSet(const char *pszValue, const struct kernel_param *pParam)
-#else
-static int vgdrvLinuxParamLogFlagsSet(const char *pszValue, struct kernel_param *pParam)
-#endif
+static int vgdrvLinuxParamLogFlagsSet(const char *pszValue, CONST_4_15 struct kernel_param *pParam)
{
if (g_fLoggerCreated)
{
@@ -1110,11 +1104,7 @@ static int vgdrvLinuxParamLogFlagsSet(const char *pszValue, struct kernel_param
}
/** log and dbg_log_flags parameter getter. */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
-static int vgdrvLinuxParamLogFlagsGet(char *pszBuf, const struct kernel_param *pParam)
-#else
-static int vgdrvLinuxParamLogFlagsGet(char *pszBuf, struct kernel_param *pParam)
-#endif
+static int vgdrvLinuxParamLogFlagsGet(char *pszBuf, CONST_4_15 struct kernel_param *pParam)
{
PRTLOGGER pLogger = pParam->name[0] == 'd' ? RTLogDefaultInstance() : RTLogRelGetDefaultInstance();
*pszBuf = '\0';
@@ -1125,11 +1115,7 @@ static int vgdrvLinuxParamLogFlagsGet(char *pszBuf, struct kernel_param *pParam)
/** log and dbg_log_dest parameter setter. */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
-static int vgdrvLinuxParamLogDstSet(const char *pszValue, const struct kernel_param *pParam)
-#else
-static int vgdrvLinuxParamLogDstSet(const char *pszValue, struct kernel_param *pParam)
-#endif
+static int vgdrvLinuxParamLogDstSet(const char *pszValue, CONST_4_15 struct kernel_param *pParam)
{
if (g_fLoggerCreated)
{
@@ -1143,11 +1129,7 @@ static int vgdrvLinuxParamLogDstSet(const char *pszValue, struct kernel_param *p
}
/** log and dbg_log_dest parameter getter. */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
-static int vgdrvLinuxParamLogDstGet(char *pszBuf, const struct kernel_param *pParam)
-#else
-static int vgdrvLinuxParamLogDstGet(char *pszBuf, struct kernel_param *pParam)
-#endif
+static int vgdrvLinuxParamLogDstGet(char *pszBuf, CONST_4_15 struct kernel_param *pParam)
{
PRTLOGGER pLogger = pParam->name[0] == 'd' ? RTLogDefaultInstance() : RTLogRelGetDefaultInstance();
*pszBuf = '\0';
@@ -1158,11 +1140,7 @@ static int vgdrvLinuxParamLogDstGet(char *pszBuf, struct kernel_param *pParam)
/** r3_log_to_host parameter setter. */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
-static int vgdrvLinuxParamR3LogToHostSet(const char *pszValue, const struct kernel_param *pParam)
-#else
-static int vgdrvLinuxParamR3LogToHostSet(const char *pszValue, struct kernel_param *pParam)
-#endif
+static int vgdrvLinuxParamR3LogToHostSet(const char *pszValue, CONST_4_15 struct kernel_param *pParam)
{
if ( pszValue == NULL
|| *pszValue == '\0'
@@ -1180,11 +1158,7 @@ static int vgdrvLinuxParamR3LogToHostSet(const char *pszValue, struct kernel_par
}
/** r3_log_to_host parameter getter. */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
-static int vgdrvLinuxParamR3LogToHostGet(char *pszBuf, const struct kernel_param *pParam)
-#else
-static int vgdrvLinuxParamR3LogToHostGet(char *pszBuf, struct kernel_param *pParam)
-#endif
+static int vgdrvLinuxParamR3LogToHostGet(char *pszBuf, CONST_4_15 struct kernel_param *pParam)
{
strcpy(pszBuf, g_DevExt.fLoggingEnabled ? "enabled" : "disabled");
return strlen(pszBuf);
@@ -56,6 +56,24 @@
/** The min nano second into the min day. (1677-09-21T00-12-43.145224192) */
#define RTTIME_MIN_DAY_NANO ( INT64_C(1000000000) * (00*3600 + 12*60 + 43) + 145224192 )
+/**
+ * Asserts that a_pTime is normalized.
+ */
+#define RTTIME_ASSERT_NORMALIZED(a_pTime) \
+ do \
+ { \
+ Assert(RT_ABS((a_pTime)->offUTC) <= 840); \
+ Assert((a_pTime)->u32Nanosecond < 1000000000); \
+ Assert((a_pTime)->u8Second < 60); \
+ Assert((a_pTime)->u8Minute < 60); \
+ Assert((a_pTime)->u8Hour < 24); \
+ Assert((a_pTime)->u8Month >= 1 && (a_pTime)->u8Month <= 12); \
+ Assert((a_pTime)->u8WeekDay < 7); \
+ Assert((a_pTime)->u16YearDay >= 1); \
+ Assert((a_pTime)->u16YearDay <= (rtTimeIsLeapYear((a_pTime)->i32Year) ? 366 : 365)); \
+ Assert((a_pTime)->u8MonthDay >= 1 && (a_pTime)->u8MonthDay <= 31); \
+ } while (0)
+
/*********************************************************************************************************************************
* Global Variables *
@@ -371,8 +389,8 @@ RT_EXPORT_SYMBOL(RTTimeExplode);
* @param pTime Pointer to the exploded time to implode.
* The fields u8Month, u8WeekDay and u8MonthDay are not used,
* and all the other fields are expected to be within their
- * bounds. Use RTTimeNormalize() to calculate u16YearDay and
- * normalize the ranges of the fields.
+ * bounds. Use RTTimeNormalize() or RTTimeLocalNormalize() to
+ * calculate u16YearDay and normalize the ranges of the fields.
*/
RTDECL(PRTTIMESPEC) RTTimeImplode(PRTTIMESPEC pTimeSpec, PCRTTIME pTime)
{
@@ -392,6 +410,7 @@ RTDECL(PRTTIMESPEC) RTTimeImplode(PRTTIMESPEC pTimeSpec, PCRTTIME pTime)
AssertReturn(pTime->u16YearDay >= 1, NULL);
AssertReturn(pTime->u16YearDay <= (rtTimeIsLeapYear(pTime->i32Year) ? 366 : 365), NULL);
AssertMsgReturn(pTime->i32Year <= RTTIME_MAX_YEAR && pTime->i32Year >= RTTIME_MIN_YEAR, ("%RI32\n", pTime->i32Year), NULL);
+ Assert(pTime->offUTC >= -840 && pTime->offUTC <= 840);
/*
* Do the conversion to nanoseconds.
@@ -409,6 +428,8 @@ RTDECL(PRTTIMESPEC) RTTimeImplode(PRTTIMESPEC pTimeSpec, PCRTTIME pTime)
AssertMsgReturn(i32Days != RTTIME_MIN_DAY || i64Nanos >= RTTIME_MIN_DAY_NANO, ("%RI64\n", i64Nanos), NULL);
i64Nanos += i32Days * UINT64_C(86400000000000);
+ if ((pTime->fFlags & RTTIME_FLAGS_TYPE_MASK) == RTTIME_FLAGS_TYPE_LOCAL)
+ i64Nanos -= pTime->offUTC * RT_NS_1MIN;
pTimeSpec->i64NanosecondsRelativeToUnixEpoch = i64Nanos;
return pTimeSpec;
@@ -418,7 +439,6 @@ RT_EXPORT_SYMBOL(RTTimeImplode);
/**
* Internal worker for RTTimeNormalize and RTTimeLocalNormalize.
- * It doesn't adjust the UCT offset but leaves that for RTTimeLocalNormalize.
*/
static PRTTIME rtTimeNormalizeInternal(PRTTIME pTime)
{
@@ -517,7 +537,7 @@ static PRTTIME rtTimeNormalizeInternal(PRTTIME pTime)
? &g_aiDayOfYearLeap[0]
: &g_aiDayOfYear[0];
pTime->u8Month = 1;
- while (pTime->u16YearDay > paiDayOfYear[pTime->u8Month])
+ while (pTime->u16YearDay >= paiDayOfYear[pTime->u8Month])
pTime->u8Month++;
Assert(pTime->u8Month >= 1 && pTime->u8Month <= 12);
pTime->u8MonthDay = pTime->u16YearDay - paiDayOfYear[pTime->u8Month - 1] + 1;
@@ -676,6 +696,45 @@ RTDECL(PRTTIME) RTTimeNormalize(PRTTIME pTime)
RT_EXPORT_SYMBOL(RTTimeNormalize);
+/**
+ * Normalizes the fields of a time structure, assuming local time.
+ *
+ * It is possible to calculate year-day from month/day and vice
+ * versa. If you adjust any of these, make sure to zero the
+ * other so you make it clear which of the fields to use. If
+ * it's ambiguous, the year-day field is used (and you get
+ * assertions in debug builds).
+ *
+ * All the time fields and the year-day or month/day fields will
+ * be adjusted for overflows. (Since all fields are unsigned, there
+ * is no underflows.) It is possible to exploit this for simple
+ * date math, though the recommended way of doing that to implode
+ * the time into a timespec and do the math on that.
+ *
+ * @returns pTime on success.
+ * @returns NULL if the data is invalid.
+ *
+ * @param pTime The time structure to normalize.
+ *
+ * @remarks This function doesn't work with UTC time, only with local time.
+ */
+RTDECL(PRTTIME) RTTimeLocalNormalize(PRTTIME pTime)
+{
+ /*
+ * Validate that we've got the minimum of stuff handy.
+ */
+ AssertReturn(VALID_PTR(pTime), NULL);
+ AssertMsgReturn(!(pTime->fFlags & ~RTTIME_FLAGS_MASK), ("%#x\n", pTime->fFlags), NULL);
+ AssertMsgReturn((pTime->fFlags & RTTIME_FLAGS_TYPE_MASK) != RTTIME_FLAGS_TYPE_UTC, ("Use RTTimeNormalize!\n"), NULL);
+
+ pTime = rtTimeNormalizeInternal(pTime);
+ if (pTime)
+ pTime->fFlags |= RTTIME_FLAGS_TYPE_LOCAL;
+ return pTime;
+}
+RT_EXPORT_SYMBOL(RTTimeLocalNormalize);
+
+
/**
* Converts a time spec to a ISO date string.
*
@@ -693,25 +752,25 @@ RTDECL(char *) RTTimeToString(PCRTTIME pTime, char *psz, size_t cb)
if ( (pTime->fFlags & RTTIME_FLAGS_TYPE_MASK) == RTTIME_FLAGS_TYPE_LOCAL
&& pTime->offUTC)
{
- int32_t offUTCHour = pTime->offUTC / 60;
- int32_t offUTCMinute = pTime->offUTC % 60;
- char chSign;
- Assert(pTime->offUTC <= 840 && pTime->offUTC >= -840);
- if (pTime->offUTC >= 0)
+ int32_t offUTC = pTime->offUTC;
+ Assert(offUTC <= 840 && offUTC >= -840);
+ char chSign;
+ if (offUTC >= 0)
chSign = '+';
else
{
chSign = '-';
- offUTCMinute = -offUTCMinute;
- offUTCHour = -offUTCHour;
+ offUTC = -offUTC;
}
+ uint32_t offUTCHour = (uint32_t)offUTC / 60;
+ uint32_t offUTCMinute = (uint32_t)offUTC % 60;
cch = RTStrPrintf(psz, cb,
- "%RI32-%02u-%02uT%02u:%02u:%02u.%09RU32%c%02d%02d",
+ "%RI32-%02u-%02uT%02u:%02u:%02u.%09RU32%c%02d%:02d",
pTime->i32Year, pTime->u8Month, pTime->u8MonthDay,
pTime->u8Hour, pTime->u8Minute, pTime->u8Second, pTime->u32Nanosecond,
chSign, offUTCHour, offUTCMinute);
if ( cch <= 15
- || psz[cch - 5] != chSign)
+ || psz[cch - 6] != chSign)
return NULL;
}
else
@@ -835,7 +894,7 @@ RTDECL(PRTTIME) RTTimeFromString(PRTTIME pTime, const char *pszString)
return NULL;
/* Second. */
- rc = RTStrToUInt8Ex(pszString, (char **)&pszString, 10, &pTime->u8Minute);
+ rc = RTStrToUInt8Ex(pszString, (char **)&pszString, 10, &pTime->u8Second);
if (rc != VINF_SUCCESS && rc != VWRN_TRAILING_CHARS && rc != VWRN_TRAILING_SPACES)
return NULL;
if (pTime->u8Second > 59)
@@ -866,9 +925,25 @@ RTDECL(PRTTIME) RTTimeFromString(PRTTIME pTime, const char *pszString)
else if ( *pszString == '+'
|| *pszString == '-')
{
- rc = RTStrToInt32Ex(pszString, (char **)&pszString, 10, &pTime->offUTC);
+ int8_t cUtcHours = 0;
+ rc = RTStrToInt8Ex(pszString, (char **)&pszString, 10, &cUtcHours);
if (rc != VINF_SUCCESS && rc != VWRN_TRAILING_CHARS && rc != VWRN_TRAILING_SPACES)
return NULL;
+ uint8_t cUtcMin = 0;
+ if (*pszString == ':')
+ {
+ rc = RTStrToUInt8Ex(pszString + 1, (char **)&pszString, 10, &cUtcMin);
+ if (rc != VINF_SUCCESS && rc != VWRN_TRAILING_SPACES)
+ return NULL;
+ }
+ else if (*pszString && !RT_C_IS_BLANK(*pszString))
+ return NULL;
+ if (cUtcHours >= 0)
+ pTime->offUTC = cUtcHours * 60 + cUtcMin;
+ else
+ pTime->offUTC = cUtcHours * 60 - cUtcMin;
+ if (RT_ABS(pTime->offUTC) > 840)
+ return NULL;
}
/* else: No time zone given, local with offUTC = 0. */
@@ -905,3 +980,208 @@ RTDECL(PRTTIMESPEC) RTTimeSpecFromString(PRTTIMESPEC pTime, const char *pszStrin
}
RT_EXPORT_SYMBOL(RTTimeSpecFromString);
+
+/**
+ * Adds one day to @a pTime.
+ *
+ * ASSUMES it is zulu time so DST can be ignored.
+ */
+static PRTTIME rtTimeAdd1Day(PRTTIME pTime)
+{
+ Assert(!pTime->offUTC);
+ rtTimeNormalizeInternal(pTime);
+ pTime->u8MonthDay += 1;
+ pTime->u16YearDay = 0;
+ return rtTimeNormalizeInternal(pTime);
+}
+
+
+/**
+ * Subtracts one day from @a pTime.
+ *
+ * ASSUMES it is zulu time so DST can be ignored.
+ */
+static PRTTIME rtTimeSub1Day(PRTTIME pTime)
+{
+ Assert(!pTime->offUTC);
+ rtTimeNormalizeInternal(pTime);
+ if (pTime->u16YearDay > 1)
+ {
+ pTime->u16YearDay -= 1;
+ pTime->u8Month = 0;
+ pTime->u8MonthDay = 0;
+ }
+ else
+ {
+ pTime->i32Year -= 1;
+ pTime->u16YearDay = rtTimeIsLeapYear(pTime->i32Year) ? 366 : 365;
+ pTime->u8MonthDay = 31;
+ pTime->u8Month = 12;
+ pTime->fFlags &= ~(RTTIME_FLAGS_COMMON_YEAR | RTTIME_FLAGS_LEAP_YEAR);
+ }
+ return rtTimeNormalizeInternal(pTime);
+}
+
+
+/**
+ * Adds a signed number of minutes to @a pTime.
+ *
+ * ASSUMES it is zulu time so DST can be ignored.
+ *
+ * @param pTime The time structure to work on.
+ * @param cAddend Number of minutes to add.
+ * ASSUMES the value isn't all that high!
+ */
+static PRTTIME rtTimeAddMinutes(PRTTIME pTime, int32_t cAddend)
+{
+ Assert(RT_ABS(cAddend) < 31 * 24 * 60);
+
+ /*
+ * Work on minutes of the day.
+ */
+ int32_t const cMinutesInDay = 24 * 60;
+ int32_t iDayMinute = (unsigned)pTime->u8Hour * 60 + pTime->u8Minute;
+ iDayMinute += cAddend;
+
+ while (iDayMinute >= cMinutesInDay)
+ {
+ rtTimeAdd1Day(pTime);
+ iDayMinute -= cMinutesInDay;
+ }
+
+ while (iDayMinute < 0)
+ {
+ rtTimeSub1Day(pTime);
+ iDayMinute += cMinutesInDay;
+ }
+
+ pTime->u8Hour = iDayMinute / 60;
+ pTime->u8Minute = iDayMinute % 60;
+
+ return pTime;
+}
+
+
+/**
+ * Converts @a pTime to zulu time (UTC) if needed.
+ *
+ * @returns pTime.
+ * @param pTime What to convert (in/out).
+ */
+static PRTTIME rtTimeConvertToZulu(PRTTIME pTime)
+{
+ RTTIME_ASSERT_NORMALIZED(pTime);
+ if ((pTime->fFlags & RTTIME_FLAGS_TYPE_MASK) != RTTIME_FLAGS_TYPE_UTC)
+ {
+ int32_t offUTC = pTime->offUTC;
+ pTime->offUTC = 0;
+ pTime->fFlags &= ~RTTIME_FLAGS_TYPE_MASK;
+ pTime->fFlags |= RTTIME_FLAGS_TYPE_UTC;
+ if (offUTC != 0)
+ rtTimeAddMinutes(pTime, -offUTC);
+ }
+ return pTime;
+}
+
+
+/**
+ * Converts a time structure to UTC, relying on UTC offset information if it contains local time.
+ *
+ * @returns pTime on success.
+ * @returns NULL if the data is invalid.
+ * @param pTime The time structure to convert.
+ */
+RTDECL(PRTTIME) RTTimeConvertToZulu(PRTTIME pTime)
+{
+ /*
+ * Validate that we've got the minimum of stuff handy.
+ */
+ AssertReturn(VALID_PTR(pTime), NULL);
+ AssertMsgReturn(!(pTime->fFlags & ~RTTIME_FLAGS_MASK), ("%#x\n", pTime->fFlags), NULL);
+
+ return rtTimeConvertToZulu(rtTimeNormalizeInternal(pTime));
+}
+RT_EXPORT_SYMBOL(RTTimeConvertToZulu);
+
+
+/**
+ * Compares two normalized time structures.
+ *
+ * @retval 0 if equal.
+ * @retval -1 if @a pLeft is earlier than @a pRight.
+ * @retval 1 if @a pRight is earlier than @a pLeft.
+ *
+ * @param pLeft The left side time. NULL is accepted.
+ * @param pRight The right side time. NULL is accepted.
+ *
+ * @note A NULL time is considered smaller than anything else. If both are
+ * NULL, they are considered equal.
+ */
+RTDECL(int) RTTimeCompare(PCRTTIME pLeft, PCRTTIME pRight)
+{
+#ifdef RT_STRICT
+ if (pLeft)
+ RTTIME_ASSERT_NORMALIZED(pLeft);
+ if (pRight)
+ RTTIME_ASSERT_NORMALIZED(pRight);
+#endif
+
+ int iRet;
+ if (pLeft)
+ {
+ if (pRight)
+ {
+ /*
+ * Only work with normalized zulu time.
+ */
+ RTTIME TmpLeft;
+ if ( pLeft->offUTC != 0
+ || pLeft->u16YearDay == 0
+ || pLeft->u16YearDay > 366
+ || pLeft->u8Hour >= 60
+ || pLeft->u8Minute >= 60
+ || pLeft->u8Second >= 60)
+ {
+ TmpLeft = *pLeft;
+ pLeft = rtTimeConvertToZulu(rtTimeNormalizeInternal(&TmpLeft));
+ }
+
+ RTTIME TmpRight;
+ if ( pRight->offUTC != 0
+ || pRight->u16YearDay == 0
+ || pRight->u16YearDay > 366
+ || pRight->u8Hour >= 60
+ || pRight->u8Minute >= 60
+ || pRight->u8Second >= 60)
+ {
+ TmpRight = *pRight;
+ pRight = rtTimeConvertToZulu(rtTimeNormalizeInternal(&TmpRight));
+ }
+
+ /*
+ * Do the comparison.
+ */
+ if ( pLeft->i32Year != pRight->i32Year)
+ iRet = pLeft->i32Year < pRight->i32Year ? -1 : 1;
+ else if ( pLeft->u16YearDay != pRight->u16YearDay)
+ iRet = pLeft->u16YearDay < pRight->u16YearDay ? -1 : 1;
+ else if ( pLeft->u8Hour != pRight->u8Hour)
+ iRet = pLeft->u8Hour < pRight->u8Hour ? -1 : 1;
+ else if ( pLeft->u8Minute != pRight->u8Minute)
+ iRet = pLeft->u8Minute < pRight->u8Minute ? -1 : 1;
+ else if ( pLeft->u8Second != pRight->u8Second)
+ iRet = pLeft->u8Second < pRight->u8Second ? -1 : 1;
+ else if ( pLeft->u32Nanosecond != pRight->u32Nanosecond)
+ iRet = pLeft->u32Nanosecond < pRight->u32Nanosecond ? -1 : 1;
+ else
+ iRet = 0;
+ }
+ else
+ iRet = 1;
+ }
+ else
+ iRet = pRight ? -1 : 0;
+ return iRet;
+}
+RT_EXPORT_SYMBOL(RTTimeCompare);
+
@@ -200,6 +200,9 @@ typedef VBGLIOCHGCMCALL const RT_FAR *PCVBGLIOCHGCMCALL;
*/
# define VBGL_HGCM_HDR_INIT_TIMED(a_pHdr, a_idClient, a_idFunction, a_cParameters, a_cMsTimeout) \
do { \
+ VBGLREQHDR_INIT_EX(&(a_pHdr)->Hdr, \
+ sizeof(VBGLIOCHGCMCALL) + (a_cParameters) * sizeof(HGCMFunctionParameter), \
+ sizeof(VBGLIOCHGCMCALL) + (a_cParameters) * sizeof(HGCMFunctionParameter)); \
(a_pHdr)->u32ClientID = (a_idClient); \
(a_pHdr)->u32Function = (a_idFunction); \
(a_pHdr)->cMsTimeout = (a_cMsTimeout); \
@@ -29,7 +29,7 @@
#include <iprt/cdefs.h>
-/** @defgroup VBox Common Defintions and Macros
+/** @defgroup grp_vbox_cdefs VBox Common Defintions and Macros
* @{
*/
@@ -52,6 +52,23 @@
# endif
#endif
+/** @def VBOX_STRICT_GUEST
+ * Be strict on guest input. This can be overriden on the compiler command line
+ * or per source file by defining VBOX_NO_STRICT_GUEST.
+ *
+ * @sa VBox/assert.h and its ASSERT_GUEST_XXXX macros.
+ */
+#ifndef VBOX_STRICT_GUEST
+# ifdef VBOX_STRICT
+# define VBOX_STRICT_GUEST
+# endif
+#endif
+/** @def VBOX_NO_STRICT_GUEST
+ * Define to override VBOX_STRICT_GUEST, disabling asserting on guest input. */
+#ifdef VBOX_NO_STRICT_GUEST
+# undef VBOX_STRICT_GUEST
+#endif
+
/*
* Shut up DOXYGEN warnings and guide it properly thru the code.
@@ -59,6 +76,8 @@
#ifdef DOXYGEN_RUNNING
#define VBOX_WITH_STATISTICS
#define VBOX_STRICT
+#define VBOX_STRICT_GUEST
+#define VBOX_NO_STRICT_GUEST
#define IN_DBG
#define IN_DIS
#define IN_INTNET_R0
@@ -1863,6 +1863,9 @@
#define VERR_SUPDRV_NOT_BUDDING_VM_PROCESS_1 (-3748)
/** The process trying to open VBoxDrv is not a budding VM process (2). */
#define VERR_SUPDRV_NOT_BUDDING_VM_PROCESS_2 (-3749)
+
+/** Raw-mode is unavailable courtesy of Hyper-V. */
+#define VERR_SUPDRV_NO_RAW_MODE_HYPER_V_ROOT (-7000)
/** @} */
@@ -101,6 +101,8 @@ typedef enum LOGGROUP
LOG_GROUP_DEV_EHCI,
/** Floppy Controller Device group. */
LOG_GROUP_DEV_FDC,
+ /** Flash Device group. */
+ LOG_GROUP_DEV_FLASH,
/** Guest Interface Manager Device group. */
LOG_GROUP_DEV_GIM,
/** HDA Device group. */
@@ -849,6 +851,7 @@ typedef enum LOGGROUP
"DEV_EFI", \
"DEV_EHCI", \
"DEV_FDC", \
+ "DEV_FLASH", \
"DEV_GIM", \
"DEV_HDA", \
"DEV_HDA_CODEC", \
@@ -4,7 +4,7 @@
*/
/*
- * Copyright (C) 2007-2017 Oracle Corporation
+ * Copyright (C) 2007-2018 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
@@ -212,6 +212,14 @@
#define RTTRACEBUF_MAGIC UINT32_C(0x19030625)
/** Magic value of RTTRACEBUFINT::u32Magic after the final release. */
#define RTTRACEBUF_MAGIC_DEAD UINT32_C(0x19500121)
+/** The value of RTTRACELOGRDRINT::u32Magic. (John Michael Scalzi) */
+#define RTTRACELOGRDR_MAGIC UINT32_C(0x19690510)
+/** The value of RTTRACELOGRDRINT::u32Magic after RTTraceLogRdrDestroy(). */
+#define RTTRACELOGRDR_MAGIC_DEAD (~RTTRACELOGRDR_MAGIC)
+/** The value of RTTRACELOGWRINT::u32Magic. (Herbert George Wells) */
+#define RTTRACELOGWR_MAGIC UINT32_C(0x18660921)
+/** The value of RTTRACELOGWRINT::u32Magic after RTTraceLogWrDestroy(). */
+#define RTTRACELOGWR_MAGIC_DEAD UINT32_C(0x19460813)
/** The value of RTVFSOBJINTERNAL::u32Magic. (Yasunari Kawabata) */
#define RTVFSOBJ_MAGIC UINT32_C(0x18990614)
/** The value of RTVFSOBJINTERNAL::u32Magic after close. */
@@ -1509,8 +1509,12 @@ DECLINLINE(void) ASMSerializeInstructionRdTscp(void)
*/
#if (defined(RT_ARCH_X86) && ARCH_BITS == 16) || defined(IN_GUEST)
# define ASMSerializeInstruction() ASMSerializeInstructionIRet()
-#else
+#elif defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
# define ASMSerializeInstruction() ASMSerializeInstructionCpuId()
+#elif defined(RT_ARCH_SPARC64)
+RTDECL(void) ASMSerializeInstruction(void);
+#else
+# error "Port me"
#endif
@@ -1519,8 +1523,20 @@ DECLINLINE(void) ASMSerializeInstructionRdTscp(void)
*/
DECLINLINE(void) ASMMemoryFence(void)
{
- /** @todo use mfence? check if all cpus we care for support it. */
-#if ARCH_BITS == 16
+#if defined(RT_ARCH_AMD64) || (defined(RT_ARCH_X86) && !defined(RT_WITH_OLD_CPU_SUPPORT))
+# if RT_INLINE_ASM_GNU_STYLE
+ __asm__ __volatile__ (".byte 0x0f,0xae,0xf0\n\t");
+# elif RT_INLINE_ASM_USES_INTRIN
+ _mm_mfence();
+# else
+ __asm
+ {
+ _emit 0x0f
+ _emit 0xae
+ _emit 0xf0
+ }
+# endif
+#elif ARCH_BITS == 16
uint16_t volatile u16;
ASMAtomicXchgU16(&u16, 0);
#else
@@ -1535,8 +1551,22 @@ DECLINLINE(void) ASMMemoryFence(void)
*/
DECLINLINE(void) ASMWriteFence(void)
{
- /** @todo use sfence? check if all cpus we care for support it. */
+#if defined(RT_ARCH_AMD64) || (defined(RT_ARCH_X86) && !defined(RT_WITH_OLD_CPU_SUPPORT))
+# if RT_INLINE_ASM_GNU_STYLE
+ __asm__ __volatile__ (".byte 0x0f,0xae,0xf8\n\t");
+# elif RT_INLINE_ASM_USES_INTRIN
+ _mm_sfence();
+# else
+ __asm
+ {
+ _emit 0x0f
+ _emit 0xae
+ _emit 0xf8
+ }
+# endif
+#else
ASMMemoryFence();
+#endif
}
@@ -1545,8 +1575,22 @@ DECLINLINE(void) ASMWriteFence(void)
*/
DECLINLINE(void) ASMReadFence(void)
{
- /** @todo use lfence? check if all cpus we care for support it. */
+#if defined(RT_ARCH_AMD64) || (defined(RT_ARCH_X86) && !defined(RT_WITH_OLD_CPU_SUPPORT))
+# if RT_INLINE_ASM_GNU_STYLE
+ __asm__ __volatile__ (".byte 0x0f,0xae,0xe8\n\t");
+# elif RT_INLINE_ASM_USES_INTRIN
+ _mm_lfence();
+# else
+ __asm
+ {
+ _emit 0x0f
+ _emit 0xae
+ _emit 0xe8
+ }
+# endif
+#else
ASMMemoryFence();
+#endif
}
@@ -3897,7 +3941,9 @@ DECLINLINE(void) ASMMemFill32(volatile void RT_FAR *pv, size_t cb, uint32_t u32)
*
* @todo Fix name, it is a predicate function but it's not returning boolean!
*/
-#if !defined(RDESKTOP) && (!defined(RT_OS_LINUX) || !defined(__KERNEL__))
+#if !defined(RDESKTOP) && (!defined(RT_OS_LINUX) || !defined(__KERNEL__)) \
+ && !defined(RT_ARCH_SPARC64) \
+ && !defined(RT_ARCH_SPARC)
DECLASM(void RT_FAR *) ASMMemFirstNonZero(void const RT_FAR *pv, size_t cb);
#else
DECLINLINE(void RT_FAR *) ASMMemFirstNonZero(void const RT_FAR *pv, size_t cb)
@@ -4004,7 +4050,9 @@ DECLINLINE(bool) ASMMemIsZeroPage(void const RT_FAR *pvPage)
* @remarks No alignment requirements.
*/
#if (!defined(RT_OS_LINUX) || !defined(__KERNEL__)) \
- && (!defined(RT_OS_FREEBSD) || !defined(_KERNEL))
+ && (!defined(RT_OS_FREEBSD) || !defined(_KERNEL)) \
+ && !defined(RT_ARCH_SPARC64) \
+ && !defined(RT_ARCH_SPARC)
DECLASM(void *) ASMMemFirstMismatchingU8(void const RT_FAR *pv, size_t cb, uint8_t u8);
#else
DECLINLINE(void *) ASMMemFirstMismatchingU8(void const RT_FAR *pv, size_t cb, uint8_t u8)
@@ -1572,6 +1572,67 @@
#endif
+/** @name Untrusted data classifications.
+ * @{ */
+/** @def RT_UNTRUSTED_USER
+ * For marking non-volatile (race free) data from user mode as untrusted.
+ * This is just for visible documentation. */
+#define RT_UNTRUSTED_USER
+/** @def RT_UNTRUSTED_VOLATILE_USER
+ * For marking volatile data shared with user mode as untrusted.
+ * This is more than just documentation as it specifies the 'volatile' keyword,
+ * because the guest could modify the data at any time. */
+#define RT_UNTRUSTED_VOLATILE_USER volatile
+
+/** @def RT_UNTRUSTED_GUEST
+ * For marking non-volatile (race free) data from the guest as untrusted.
+ * This is just for visible documentation. */
+#define RT_UNTRUSTED_GUEST
+/** @def RT_UNTRUSTED_VOLATILE_GUEST
+ * For marking volatile data shared with the guest as untrusted.
+ * This is more than just documentation as it specifies the 'volatile' keyword,
+ * because the guest could modify the data at any time. */
+#define RT_UNTRUSTED_VOLATILE_GUEST volatile
+
+/** @def RT_UNTRUSTED_HOST
+ * For marking non-volatile (race free) data from the host as untrusted.
+ * This is just for visible documentation. */
+#define RT_UNTRUSTED_HOST
+/** @def RT_UNTRUSTED_VOLATILE_HOST
+ * For marking volatile data shared with the host as untrusted.
+ * This is more than just documentation as it specifies the 'volatile' keyword,
+ * because the host could modify the data at any time. */
+#define RT_UNTRUSTED_VOLATILE_HOST volatile
+
+/** @def RT_UNTRUSTED_HSTGST
+ * For marking non-volatile (race free) data from the host/gust as untrusted.
+ * This is just for visible documentation. */
+#define RT_UNTRUSTED_HSTGST
+/** @def RT_UNTRUSTED_VOLATILE_HSTGST
+ * For marking volatile data shared with the host/guest as untrusted.
+ * This is more than just documentation as it specifies the 'volatile' keyword,
+ * because the host could modify the data at any time. */
+#define RT_UNTRUSTED_VOLATILE_HSTGST volatile
+/** @} */
+
+/** @name Fences for use when handling untrusted data.
+ * @{ */
+/** For use after copying untruated volatile data to a non-volatile location.
+ * This translates to a compiler memory barrier and will help ensure that the
+ * compiler uses the non-volatile copy of the data. */
+#define RT_UNTRUSTED_NONVOLATILE_COPY_FENCE() ASMCompilerBarrier()
+/** For use after finished validating guest input.
+ * What this translates to is architecture dependent. On intel it will
+ * translate to a CPU load+store fence as well as a compiler memory barrier. */
+#if defined(RT_ARCH_AMD64) || (defined(RT_ARCH_X86) && !defined(RT_WITH_OLD_CPU_SUPPORT))
+# define RT_UNTRUSTED_VALIDATED_FENCE() do { ASMCompilerBarrier(); ASMReadFence(); } while (0)
+#elif defined(RT_ARCH_X86)
+# define RT_UNTRUSTED_VALIDATED_FENCE() do { ASMCompilerBarrier(); ASMMemoryFence(); } while (0)
+#else
+# define RT_UNTRUSTED_VALIDATED_FENCE() do { ASMCompilerBarrier(); } while (0)
+#endif
+/** @} */
+
/** @def RT_LIKELY
* Give the compiler a hint that an expression is very likely to hold true.
@@ -2352,6 +2352,8 @@ RT_C_DECLS_END
#define VERR_ASN1_TOO_DEEPLY_NESTED (-22855)
/** Generic unexpected object ID error. */
#define VERR_ASN1_UNEXPECTED_OBJ_ID (-22856)
+/** Invalid ASN.1 INTEGER encoding. */
+#define VERR_ASN1_INVALID_INTEGER_ENCODING (-22857)
/** ANS.1 internal error 1. */
#define VERR_ASN1_INTERNAL_ERROR_1 (-22895)
@@ -2223,6 +2223,8 @@
# define RTThreadWait RT_MANGLER(RTThreadWait)
# define RTThreadWaitNoResume RT_MANGLER(RTThreadWaitNoResume)
# define RTThreadYield RT_MANGLER(RTThreadYield)
+# define RTTimeCompare RT_MANGLER(RTTimeCompare)
+# define RTTimeConvertToZulu RT_MANGLER(RTTimeConvertToZulu)
# define RTTimeDbgBad RT_MANGLER(RTTimeDbgBad)
# define RTTimeDbgExpired RT_MANGLER(RTTimeDbgExpired)
# define RTTimeDbgRaces RT_MANGLER(RTTimeDbgRaces)
@@ -2232,6 +2234,7 @@
# define RTTimeIsLeapYear RT_MANGLER(RTTimeIsLeapYear)
# define RTTimeLocalDeltaNano RT_MANGLER(RTTimeLocalDeltaNano)
# define RTTimeLocalExplode RT_MANGLER(RTTimeLocalExplode)
+# define RTTimeLocalNormalize RT_MANGLER(RTTimeLocalNormalize)
# define RTTimeLocalNow RT_MANGLER(RTTimeLocalNow)
# define RTTimeMilliTS RT_MANGLER(RTTimeMilliTS)
# define RTTimeNanoTS RT_MANGLER(RTTimeNanoTS)
@@ -2336,6 +2339,31 @@
# define RTTraceBufRelease RT_MANGLER(RTTraceBufRelease)
# define RTTraceBufRetain RT_MANGLER(RTTraceBufRetain)
# define RTTraceGetDefaultBuf RT_MANGLER(RTTraceGetDefaultBuf)
+# define RTTraceLogRdrCreate RT_MANGLER(RTTraceLogRdrCreate)
+# define RTTraceLogRdrCreateFromFile RT_MANGLER(RTTraceLogRdrCreateFromFile)
+# define RTTraceLogRdrDestroy RT_MANGLER(RTTraceLogRdrDestroy)
+# define RTTraceLogRdrEvtFillVals RT_MANGLER(RTTraceLogRdrEvtFillVals)
+# define RTTraceLogRdrEvtGetDesc RT_MANGLER(RTTraceLogRdrEvtGetDesc)
+# define RTTraceLogRdrEvtGetSeqNo RT_MANGLER(RTTraceLogRdrEvtGetSeqNo)
+# define RTTraceLogRdrEvtGetTs RT_MANGLER(RTTraceLogRdrEvtGetTs)
+# define RTTraceLogRdrEvtIsGrouped RT_MANGLER(RTTraceLogRdrEvtIsGrouped)
+# define RTTraceLogRdrEvtPoll RT_MANGLER(RTTraceLogRdrEvtPoll)
+# define RTTraceLogRdrEvtQueryVal RT_MANGLER(RTTraceLogRdrEvtQueryVal)
+# define RTTraceLogRdrIteratorFree RT_MANGLER(RTTraceLogRdrIteratorFree)
+# define RTTraceLogRdrIteratorNext RT_MANGLER(RTTraceLogRdrIteratorNext)
+# define RTTraceLogRdrIteratorQueryEvent RT_MANGLER(RTTraceLogRdrIteratorQueryEvent)
+# define RTTraceLogRdrQueryIterator RT_MANGLER(RTTraceLogRdrQueryIterator)
+# define RTTraceLogRdrQueryLastEvt RT_MANGLER(RTTraceLogRdrQueryLastEvt)
+# define RTTraceLogWrAddEvtDesc RT_MANGLER(RTTraceLogWrAddEvtDesc)
+# define RTTraceLogWrCreate RT_MANGLER(RTTraceLogWrCreate)
+# define RTTraceLogWrCreateFile RT_MANGLER(RTTraceLogWrCreateFile)
+# define RTTraceLogWrCreateTcpClient RT_MANGLER(RTTraceLogWrCreateTcpClient)
+# define RTTraceLogWrCreateTcpServer RT_MANGLER(RTTraceLogWrCreateTcpServer)
+# define RTTraceLogWrDestroy RT_MANGLER(RTTraceLogWrDestroy)
+# define RTTraceLogWrEvtAdd RT_MANGLER(RTTraceLogWrEvtAdd)
+# define RTTraceLogWrEvtAddL RT_MANGLER(RTTraceLogWrEvtAddL)
+# define RTTraceLogWrEvtAddLV RT_MANGLER(RTTraceLogWrEvtAddLV)
+# define RTTraceLogWrEvtAddSg RT_MANGLER(RTTraceLogWrEvtAddSg)
# define RTTraceSetDefaultBuf RT_MANGLER(RTTraceSetDefaultBuf)
# define RTUdpCreateClientSocket RT_MANGLER(RTUdpCreateClientSocket)
# define RTUdpRead RT_MANGLER(RTUdpRead)
@@ -142,6 +142,31 @@ RT_C_DECLS_END
#define RT_BZERO(pv, cb) do { memset((pv), 0, cb); } while (0)
+/**
+ * For copying a volatile variable to a non-volatile one.
+ * @param a_Dst The non-volatile destination variable.
+ * @param a_VolatileSrc The volatile source variable / dereferenced pointer.
+ */
+#define RT_COPY_VOLATILE(a_Dst, a_VolatileSrc) \
+ do { \
+ void const volatile *a_pvVolatileSrc_BCopy_Volatile = &(a_VolatileSrc); \
+ AssertCompile(sizeof(a_Dst) == sizeof(a_VolatileSrc)); \
+ memcpy(&(a_Dst), (void const *)a_pvVolatileSrc_BCopy_Volatile, sizeof(a_Dst)); \
+ } while (0)
+
+/**
+ * For copy a number of bytes from a volatile buffer to a non-volatile one.
+ *
+ * @param a_pDst Pointer to the destination buffer.
+ * @param a_pVolatileSrc Pointer to the volatile source buffer.
+ * @param a_cbToCopy Number of bytes to copy.
+ */
+#define RT_BCOPY_VOLATILE(a_pDst, a_pVolatileSrc, a_cbToCopy) \
+ do { \
+ void const volatile *a_pvVolatileSrc_BCopy_Volatile = (a_pVolatileSrc); \
+ memcpy((a_pDst), (void const *)a_pvVolatileSrc_BCopy_Volatile, (a_cbToCopy)); \
+ } while (0)
+
/** @defgroup grp_rt_str RTStr - String Manipulation
* Mostly UTF-8 related helpers where the standard string functions won't do.
@@ -609,8 +609,7 @@ typedef struct RTTIME
uint32_t u32Nanosecond;
/** Flags, of the RTTIME_FLAGS_* \#defines. */
uint32_t fFlags;
- /** UCT time offset in minutes (-840-840).
- * @remarks The implementation of RTTimeLocal* isn't quite there yet, so this might not be 100% correct. */
+ /** UCT time offset in minutes (-840-840). */
int32_t offUTC;
} RTTIME;
#pragma pack()
@@ -752,6 +751,16 @@ RTDECL(PRTTIME) RTTimeLocalExplode(PRTTIME pTime, PCRTTIMESPEC pTimeSpec);
*/
RTDECL(PRTTIME) RTTimeLocalNormalize(PRTTIME pTime);
+/**
+ * Converts a time structure to UTC, relying on UTC offset information
+ * if it contains local time.
+ *
+ * @returns pTime on success.
+ * @returns NULL if the data is invalid.
+ * @param pTime The time structure to convert.
+ */
+RTDECL(PRTTIME) RTTimeConvertToZulu(PRTTIME pTime);
+
/**
* Converts a time spec to a ISO date string.
*
@@ -785,6 +794,21 @@ RTDECL(PRTTIME) RTTimeFromString(PRTTIME pTime, const char *pszString);
*/
RTDECL(bool) RTTimeIsLeapYear(int32_t i32Year);
+/**
+ * Compares two normalized time structures.
+ *
+ * @retval 0 if equal.
+ * @retval -1 if @a pLeft is earlier than @a pRight.
+ * @retval 1 if @a pRight is earlier than @a pLeft.
+ *
+ * @param pLeft The left side time. NULL is accepted.
+ * @param pRight The right side time. NULL is accepted.
+ *
+ * @note A NULL time is considered smaller than anything else. If both are
+ * NULL, they are considered equal.
+ */
+RTDECL(int) RTTimeCompare(PCRTTIME pLeft, PCRTTIME pRight);
+
/**
* Gets the current nanosecond timestamp.
*
@@ -615,8 +615,9 @@ typedef const X86CPUIDFEATEDX *PCX86CPUIDFEATEDX;
#define X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB RT_BIT_32(26)
/** EDX Bit 27 - IBRS & IBPB - Supports the STIBP flag in IA32_SPEC_CTRL. */
#define X86_CPUID_STEXT_FEATURE_EDX_STIBP RT_BIT_32(27)
-
-/** EDX Bit 29 - ARCHCAP - Supports the IA32_ARCH_CAP MSR. */
+/** EDX Bit 28 - FLUSH_CMD - Supports IA32_FLUSH_CMD MSR. */
+#define X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD RT_BIT_32(28)
+/** EDX Bit 29 - ARCHCAP - Supports the IA32_ARCH_CAPABILITIES MSR. */
#define X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP RT_BIT_32(29)
/** @} */
@@ -897,6 +898,8 @@ typedef const X86CPUIDFEATEDX *PCX86CPUIDFEATEDX;
#define X86_CR4_VMXE RT_BIT_32(13)
/** Bit 14 - SMXE - Safer Mode Extensions Enabled. */
#define X86_CR4_SMXE RT_BIT_32(14)
+/** Bit 16 - FSGSBASE - Read/write FSGSBASE instructions Enable. */
+#define X86_CR4_FSGSBASE RT_BIT_32(16)
/** Bit 17 - PCIDE - Process-Context Identifiers Enabled. */
#define X86_CR4_PCIDE RT_BIT_32(17)
/** Bit 18 - OSXSAVE - Operating System Support for XSAVE and processor
@@ -1200,13 +1203,22 @@ AssertCompile(X86_DR7_ANY_RW_IO(UINT32_C(0x00040000)) == 0);
/** MTRR Capabilities. */
#define MSR_IA32_MTRR_CAP 0xFE
-/** Architecture capabilities (bugfixes).
- * @note May move */
-#define MSR_IA32_ARCH_CAP UINT32_C(0x10a)
-/** CPU is no subject to spectre problems. */
-#define MSR_IA32_ARCH_CAP_F_SPECTRE_FIX RT_BIT_32(0)
+/** Architecture capabilities (bugfixes). */
+#define MSR_IA32_ARCH_CAPABILITIES UINT32_C(0x10a)
+/** CPU is no subject to meltdown problems. */
+#define MSR_IA32_ARCH_CAP_F_RDCL_NO RT_BIT_32(0)
/** CPU has better IBRS and you can leave it on all the time. */
-#define MSR_IA32_ARCH_CAP_F_BETTER_IBRS RT_BIT_32(1)
+#define MSR_IA32_ARCH_CAP_F_IBRS_ALL RT_BIT_32(1)
+/** CPU has return stack buffer (RSB) override. */
+#define MSR_IA32_ARCH_CAP_F_RSBO RT_BIT_32(2)
+/** Virtual machine monitors need not flush the level 1 data cache on VM entry.
+ * This is also the case when MSR_IA32_ARCH_CAP_F_RDCL_NO is set. */
+#define MSR_IA32_ARCH_CAP_F_VMM_NEED_NOT_FLUSH_L1D RT_BIT_32(3)
+
+/** Flush command register. */
+#define MSR_IA32_FLUSH_CMD UINT32_C(0x10b)
+/** Flush the level 1 data cache when this bit is written. */
+#define MSR_IA32_FLUSH_CMD_F_L1D RT_BIT_32(0)
/** Cache control/info. */
#define MSR_BBL_CR_CTL3 UINT32_C(0x11e)
@@ -2584,6 +2596,20 @@ typedef const X86PML4 *PCX86PML4;
/** @} */
+/**
+ * Intel PCID invalidation types.
+ */
+/** Individual address invalidation. */
+#define X86_INVPCID_TYPE_INDV_ADDR 0
+/** Single-context invalidation. */
+#define X86_INVPCID_TYPE_SINGLE_CONTEXT 1
+/** All-context including globals invalidation. */
+#define X86_INVPCID_TYPE_ALL_CONTEXT_INCL_GLOBAL 2
+/** All-context excluding globals invalidation. */
+#define X86_INVPCID_TYPE_ALL_CONTEXT_EXCL_GLOBAL 3
+/** The maximum valid invalidation type value. */
+#define X86_INVPCID_TYPE_MAX_VALID X86_INVPCID_TYPE_ALL_CONTEXT_EXCL_GLOBAL
+
/**
* 32-bit protected mode FSTENV image.
*/
@@ -46,7 +46,7 @@
typedef struct RTR0SEMLNXWAIT
{
/** The wait queue entry. */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 14) /* 4.13.0 and openSUSE */
wait_queue_entry_t WaitQE;
#else
wait_queue_t WaitQE;
@@ -1 +1 @@
-#define VBOX_SVN_REV 120293
+#define VBOX_SVN_REV 123745
@@ -3,9 +3,9 @@
#define VBOX_VERSION_MAJOR 5
#define VBOX_VERSION_MINOR 2
-#define VBOX_VERSION_BUILD 6
-#define VBOX_VERSION_STRING_RAW "5.2.6"
-#define VBOX_VERSION_STRING "5.2.6_KernelUbuntu"
+#define VBOX_VERSION_BUILD 18
+#define VBOX_VERSION_STRING_RAW "5.2.18"
+#define VBOX_VERSION_STRING "5.2.18_KernelUbuntu"
#define VBOX_API_VERSION_STRING "5_2"
#define VBOX_PRIVATE_BUILD_DESC "Private build by buildd"
@@ -200,6 +200,9 @@ typedef VBGLIOCHGCMCALL const RT_FAR *PCVBGLIOCHGCMCALL;
*/
# define VBGL_HGCM_HDR_INIT_TIMED(a_pHdr, a_idClient, a_idFunction, a_cParameters, a_cMsTimeout) \
do { \
+ VBGLREQHDR_INIT_EX(&(a_pHdr)->Hdr, \
+ sizeof(VBGLIOCHGCMCALL) + (a_cParameters) * sizeof(HGCMFunctionParameter), \
+ sizeof(VBGLIOCHGCMCALL) + (a_cParameters) * sizeof(HGCMFunctionParameter)); \
(a_pHdr)->u32ClientID = (a_idClient); \
(a_pHdr)->u32Function = (a_idFunction); \
(a_pHdr)->cMsTimeout = (a_cMsTimeout); \
@@ -29,7 +29,7 @@
#include <iprt/cdefs.h>
-/** @defgroup VBox Common Defintions and Macros
+/** @defgroup grp_vbox_cdefs VBox Common Defintions and Macros
* @{
*/
@@ -52,6 +52,23 @@
# endif
#endif
+/** @def VBOX_STRICT_GUEST
+ * Be strict on guest input. This can be overriden on the compiler command line
+ * or per source file by defining VBOX_NO_STRICT_GUEST.
+ *
+ * @sa VBox/assert.h and its ASSERT_GUEST_XXXX macros.
+ */
+#ifndef VBOX_STRICT_GUEST
+# ifdef VBOX_STRICT
+# define VBOX_STRICT_GUEST
+# endif
+#endif
+/** @def VBOX_NO_STRICT_GUEST
+ * Define to override VBOX_STRICT_GUEST, disabling asserting on guest input. */
+#ifdef VBOX_NO_STRICT_GUEST
+# undef VBOX_STRICT_GUEST
+#endif
+
/*
* Shut up DOXYGEN warnings and guide it properly thru the code.
@@ -59,6 +76,8 @@
#ifdef DOXYGEN_RUNNING
#define VBOX_WITH_STATISTICS
#define VBOX_STRICT
+#define VBOX_STRICT_GUEST
+#define VBOX_NO_STRICT_GUEST
#define IN_DBG
#define IN_DIS
#define IN_INTNET_R0
@@ -1863,6 +1863,9 @@
#define VERR_SUPDRV_NOT_BUDDING_VM_PROCESS_1 (-3748)
/** The process trying to open VBoxDrv is not a budding VM process (2). */
#define VERR_SUPDRV_NOT_BUDDING_VM_PROCESS_2 (-3749)
+
+/** Raw-mode is unavailable courtesy of Hyper-V. */
+#define VERR_SUPDRV_NO_RAW_MODE_HYPER_V_ROOT (-7000)
/** @} */
@@ -101,6 +101,8 @@ typedef enum LOGGROUP
LOG_GROUP_DEV_EHCI,
/** Floppy Controller Device group. */
LOG_GROUP_DEV_FDC,
+ /** Flash Device group. */
+ LOG_GROUP_DEV_FLASH,
/** Guest Interface Manager Device group. */
LOG_GROUP_DEV_GIM,
/** HDA Device group. */
@@ -849,6 +851,7 @@ typedef enum LOGGROUP
"DEV_EFI", \
"DEV_EHCI", \
"DEV_FDC", \
+ "DEV_FLASH", \
"DEV_GIM", \
"DEV_HDA", \
"DEV_HDA_CODEC", \
@@ -483,6 +483,8 @@ DECLINLINE(void) vbfsCopyFsObjInfoFromIprt(PSHFLFSOBJINFO pDst, PCRTFSOBJINFO pS
pDst->ChangeTime = pSrc->ChangeTime;
pDst->BirthTime = pSrc->BirthTime;
pDst->Attr.fMode = pSrc->Attr.fMode;
+ /* Clear bits which we don't pass through for security reasons. */
+ pDst->Attr.fMode &= ~(RTFS_UNIX_ISUID | RTFS_UNIX_ISGID | RTFS_UNIX_ISTXT);
RT_ZERO(pDst->Attr.u);
switch (pSrc->Attr.enmAdditional)
{
@@ -1509,8 +1509,12 @@ DECLINLINE(void) ASMSerializeInstructionRdTscp(void)
*/
#if (defined(RT_ARCH_X86) && ARCH_BITS == 16) || defined(IN_GUEST)
# define ASMSerializeInstruction() ASMSerializeInstructionIRet()
-#else
+#elif defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
# define ASMSerializeInstruction() ASMSerializeInstructionCpuId()
+#elif defined(RT_ARCH_SPARC64)
+RTDECL(void) ASMSerializeInstruction(void);
+#else
+# error "Port me"
#endif
@@ -1519,8 +1523,20 @@ DECLINLINE(void) ASMSerializeInstructionRdTscp(void)
*/
DECLINLINE(void) ASMMemoryFence(void)
{
- /** @todo use mfence? check if all cpus we care for support it. */
-#if ARCH_BITS == 16
+#if defined(RT_ARCH_AMD64) || (defined(RT_ARCH_X86) && !defined(RT_WITH_OLD_CPU_SUPPORT))
+# if RT_INLINE_ASM_GNU_STYLE
+ __asm__ __volatile__ (".byte 0x0f,0xae,0xf0\n\t");
+# elif RT_INLINE_ASM_USES_INTRIN
+ _mm_mfence();
+# else
+ __asm
+ {
+ _emit 0x0f
+ _emit 0xae
+ _emit 0xf0
+ }
+# endif
+#elif ARCH_BITS == 16
uint16_t volatile u16;
ASMAtomicXchgU16(&u16, 0);
#else
@@ -1535,8 +1551,22 @@ DECLINLINE(void) ASMMemoryFence(void)
*/
DECLINLINE(void) ASMWriteFence(void)
{
- /** @todo use sfence? check if all cpus we care for support it. */
+#if defined(RT_ARCH_AMD64) || (defined(RT_ARCH_X86) && !defined(RT_WITH_OLD_CPU_SUPPORT))
+# if RT_INLINE_ASM_GNU_STYLE
+ __asm__ __volatile__ (".byte 0x0f,0xae,0xf8\n\t");
+# elif RT_INLINE_ASM_USES_INTRIN
+ _mm_sfence();
+# else
+ __asm
+ {
+ _emit 0x0f
+ _emit 0xae
+ _emit 0xf8
+ }
+# endif
+#else
ASMMemoryFence();
+#endif
}
@@ -1545,8 +1575,22 @@ DECLINLINE(void) ASMWriteFence(void)
*/
DECLINLINE(void) ASMReadFence(void)
{
- /** @todo use lfence? check if all cpus we care for support it. */
+#if defined(RT_ARCH_AMD64) || (defined(RT_ARCH_X86) && !defined(RT_WITH_OLD_CPU_SUPPORT))
+# if RT_INLINE_ASM_GNU_STYLE
+ __asm__ __volatile__ (".byte 0x0f,0xae,0xe8\n\t");
+# elif RT_INLINE_ASM_USES_INTRIN
+ _mm_lfence();
+# else
+ __asm
+ {
+ _emit 0x0f
+ _emit 0xae
+ _emit 0xe8
+ }
+# endif
+#else
ASMMemoryFence();
+#endif
}
@@ -3897,7 +3941,9 @@ DECLINLINE(void) ASMMemFill32(volatile void RT_FAR *pv, size_t cb, uint32_t u32)
*
* @todo Fix name, it is a predicate function but it's not returning boolean!
*/
-#if !defined(RDESKTOP) && (!defined(RT_OS_LINUX) || !defined(__KERNEL__))
+#if !defined(RDESKTOP) && (!defined(RT_OS_LINUX) || !defined(__KERNEL__)) \
+ && !defined(RT_ARCH_SPARC64) \
+ && !defined(RT_ARCH_SPARC)
DECLASM(void RT_FAR *) ASMMemFirstNonZero(void const RT_FAR *pv, size_t cb);
#else
DECLINLINE(void RT_FAR *) ASMMemFirstNonZero(void const RT_FAR *pv, size_t cb)
@@ -4004,7 +4050,9 @@ DECLINLINE(bool) ASMMemIsZeroPage(void const RT_FAR *pvPage)
* @remarks No alignment requirements.
*/
#if (!defined(RT_OS_LINUX) || !defined(__KERNEL__)) \
- && (!defined(RT_OS_FREEBSD) || !defined(_KERNEL))
+ && (!defined(RT_OS_FREEBSD) || !defined(_KERNEL)) \
+ && !defined(RT_ARCH_SPARC64) \
+ && !defined(RT_ARCH_SPARC)
DECLASM(void *) ASMMemFirstMismatchingU8(void const RT_FAR *pv, size_t cb, uint8_t u8);
#else
DECLINLINE(void *) ASMMemFirstMismatchingU8(void const RT_FAR *pv, size_t cb, uint8_t u8)
@@ -1572,6 +1572,67 @@
#endif
+/** @name Untrusted data classifications.
+ * @{ */
+/** @def RT_UNTRUSTED_USER
+ * For marking non-volatile (race free) data from user mode as untrusted.
+ * This is just for visible documentation. */
+#define RT_UNTRUSTED_USER
+/** @def RT_UNTRUSTED_VOLATILE_USER
+ * For marking volatile data shared with user mode as untrusted.
+ * This is more than just documentation as it specifies the 'volatile' keyword,
+ * because the guest could modify the data at any time. */
+#define RT_UNTRUSTED_VOLATILE_USER volatile
+
+/** @def RT_UNTRUSTED_GUEST
+ * For marking non-volatile (race free) data from the guest as untrusted.
+ * This is just for visible documentation. */
+#define RT_UNTRUSTED_GUEST
+/** @def RT_UNTRUSTED_VOLATILE_GUEST
+ * For marking volatile data shared with the guest as untrusted.
+ * This is more than just documentation as it specifies the 'volatile' keyword,
+ * because the guest could modify the data at any time. */
+#define RT_UNTRUSTED_VOLATILE_GUEST volatile
+
+/** @def RT_UNTRUSTED_HOST
+ * For marking non-volatile (race free) data from the host as untrusted.
+ * This is just for visible documentation. */
+#define RT_UNTRUSTED_HOST
+/** @def RT_UNTRUSTED_VOLATILE_HOST
+ * For marking volatile data shared with the host as untrusted.
+ * This is more than just documentation as it specifies the 'volatile' keyword,
+ * because the host could modify the data at any time. */
+#define RT_UNTRUSTED_VOLATILE_HOST volatile
+
+/** @def RT_UNTRUSTED_HSTGST
+ * For marking non-volatile (race free) data from the host/gust as untrusted.
+ * This is just for visible documentation. */
+#define RT_UNTRUSTED_HSTGST
+/** @def RT_UNTRUSTED_VOLATILE_HSTGST
+ * For marking volatile data shared with the host/guest as untrusted.
+ * This is more than just documentation as it specifies the 'volatile' keyword,
+ * because the host could modify the data at any time. */
+#define RT_UNTRUSTED_VOLATILE_HSTGST volatile
+/** @} */
+
+/** @name Fences for use when handling untrusted data.
+ * @{ */
+/** For use after copying untruated volatile data to a non-volatile location.
+ * This translates to a compiler memory barrier and will help ensure that the
+ * compiler uses the non-volatile copy of the data. */
+#define RT_UNTRUSTED_NONVOLATILE_COPY_FENCE() ASMCompilerBarrier()
+/** For use after finished validating guest input.
+ * What this translates to is architecture dependent. On intel it will
+ * translate to a CPU load+store fence as well as a compiler memory barrier. */
+#if defined(RT_ARCH_AMD64) || (defined(RT_ARCH_X86) && !defined(RT_WITH_OLD_CPU_SUPPORT))
+# define RT_UNTRUSTED_VALIDATED_FENCE() do { ASMCompilerBarrier(); ASMReadFence(); } while (0)
+#elif defined(RT_ARCH_X86)
+# define RT_UNTRUSTED_VALIDATED_FENCE() do { ASMCompilerBarrier(); ASMMemoryFence(); } while (0)
+#else
+# define RT_UNTRUSTED_VALIDATED_FENCE() do { ASMCompilerBarrier(); } while (0)
+#endif
+/** @} */
+
/** @def RT_LIKELY
* Give the compiler a hint that an expression is very likely to hold true.
@@ -2352,6 +2352,8 @@ RT_C_DECLS_END
#define VERR_ASN1_TOO_DEEPLY_NESTED (-22855)
/** Generic unexpected object ID error. */
#define VERR_ASN1_UNEXPECTED_OBJ_ID (-22856)
+/** Invalid ASN.1 INTEGER encoding. */
+#define VERR_ASN1_INVALID_INTEGER_ENCODING (-22857)
/** ANS.1 internal error 1. */
#define VERR_ASN1_INTERNAL_ERROR_1 (-22895)
@@ -2223,6 +2223,8 @@
# define RTThreadWait RT_MANGLER(RTThreadWait)
# define RTThreadWaitNoResume RT_MANGLER(RTThreadWaitNoResume)
# define RTThreadYield RT_MANGLER(RTThreadYield)
+# define RTTimeCompare RT_MANGLER(RTTimeCompare)
+# define RTTimeConvertToZulu RT_MANGLER(RTTimeConvertToZulu)
# define RTTimeDbgBad RT_MANGLER(RTTimeDbgBad)
# define RTTimeDbgExpired RT_MANGLER(RTTimeDbgExpired)
# define RTTimeDbgRaces RT_MANGLER(RTTimeDbgRaces)
@@ -2232,6 +2234,7 @@
# define RTTimeIsLeapYear RT_MANGLER(RTTimeIsLeapYear)
# define RTTimeLocalDeltaNano RT_MANGLER(RTTimeLocalDeltaNano)
# define RTTimeLocalExplode RT_MANGLER(RTTimeLocalExplode)
+# define RTTimeLocalNormalize RT_MANGLER(RTTimeLocalNormalize)
# define RTTimeLocalNow RT_MANGLER(RTTimeLocalNow)
# define RTTimeMilliTS RT_MANGLER(RTTimeMilliTS)
# define RTTimeNanoTS RT_MANGLER(RTTimeNanoTS)
@@ -2336,6 +2339,31 @@
# define RTTraceBufRelease RT_MANGLER(RTTraceBufRelease)
# define RTTraceBufRetain RT_MANGLER(RTTraceBufRetain)
# define RTTraceGetDefaultBuf RT_MANGLER(RTTraceGetDefaultBuf)
+# define RTTraceLogRdrCreate RT_MANGLER(RTTraceLogRdrCreate)
+# define RTTraceLogRdrCreateFromFile RT_MANGLER(RTTraceLogRdrCreateFromFile)
+# define RTTraceLogRdrDestroy RT_MANGLER(RTTraceLogRdrDestroy)
+# define RTTraceLogRdrEvtFillVals RT_MANGLER(RTTraceLogRdrEvtFillVals)
+# define RTTraceLogRdrEvtGetDesc RT_MANGLER(RTTraceLogRdrEvtGetDesc)
+# define RTTraceLogRdrEvtGetSeqNo RT_MANGLER(RTTraceLogRdrEvtGetSeqNo)
+# define RTTraceLogRdrEvtGetTs RT_MANGLER(RTTraceLogRdrEvtGetTs)
+# define RTTraceLogRdrEvtIsGrouped RT_MANGLER(RTTraceLogRdrEvtIsGrouped)
+# define RTTraceLogRdrEvtPoll RT_MANGLER(RTTraceLogRdrEvtPoll)
+# define RTTraceLogRdrEvtQueryVal RT_MANGLER(RTTraceLogRdrEvtQueryVal)
+# define RTTraceLogRdrIteratorFree RT_MANGLER(RTTraceLogRdrIteratorFree)
+# define RTTraceLogRdrIteratorNext RT_MANGLER(RTTraceLogRdrIteratorNext)
+# define RTTraceLogRdrIteratorQueryEvent RT_MANGLER(RTTraceLogRdrIteratorQueryEvent)
+# define RTTraceLogRdrQueryIterator RT_MANGLER(RTTraceLogRdrQueryIterator)
+# define RTTraceLogRdrQueryLastEvt RT_MANGLER(RTTraceLogRdrQueryLastEvt)
+# define RTTraceLogWrAddEvtDesc RT_MANGLER(RTTraceLogWrAddEvtDesc)
+# define RTTraceLogWrCreate RT_MANGLER(RTTraceLogWrCreate)
+# define RTTraceLogWrCreateFile RT_MANGLER(RTTraceLogWrCreateFile)
+# define RTTraceLogWrCreateTcpClient RT_MANGLER(RTTraceLogWrCreateTcpClient)
+# define RTTraceLogWrCreateTcpServer RT_MANGLER(RTTraceLogWrCreateTcpServer)
+# define RTTraceLogWrDestroy RT_MANGLER(RTTraceLogWrDestroy)
+# define RTTraceLogWrEvtAdd RT_MANGLER(RTTraceLogWrEvtAdd)
+# define RTTraceLogWrEvtAddL RT_MANGLER(RTTraceLogWrEvtAddL)
+# define RTTraceLogWrEvtAddLV RT_MANGLER(RTTraceLogWrEvtAddLV)
+# define RTTraceLogWrEvtAddSg RT_MANGLER(RTTraceLogWrEvtAddSg)
# define RTTraceSetDefaultBuf RT_MANGLER(RTTraceSetDefaultBuf)
# define RTUdpCreateClientSocket RT_MANGLER(RTUdpCreateClientSocket)
# define RTUdpRead RT_MANGLER(RTUdpRead)
@@ -142,6 +142,31 @@ RT_C_DECLS_END
#define RT_BZERO(pv, cb) do { memset((pv), 0, cb); } while (0)
+/**
+ * For copying a volatile variable to a non-volatile one.
+ * @param a_Dst The non-volatile destination variable.
+ * @param a_VolatileSrc The volatile source variable / dereferenced pointer.
+ */
+#define RT_COPY_VOLATILE(a_Dst, a_VolatileSrc) \
+ do { \
+ void const volatile *a_pvVolatileSrc_BCopy_Volatile = &(a_VolatileSrc); \
+ AssertCompile(sizeof(a_Dst) == sizeof(a_VolatileSrc)); \
+ memcpy(&(a_Dst), (void const *)a_pvVolatileSrc_BCopy_Volatile, sizeof(a_Dst)); \
+ } while (0)
+
+/**
+ * For copy a number of bytes from a volatile buffer to a non-volatile one.
+ *
+ * @param a_pDst Pointer to the destination buffer.
+ * @param a_pVolatileSrc Pointer to the volatile source buffer.
+ * @param a_cbToCopy Number of bytes to copy.
+ */
+#define RT_BCOPY_VOLATILE(a_pDst, a_pVolatileSrc, a_cbToCopy) \
+ do { \
+ void const volatile *a_pvVolatileSrc_BCopy_Volatile = (a_pVolatileSrc); \
+ memcpy((a_pDst), (void const *)a_pvVolatileSrc_BCopy_Volatile, (a_cbToCopy)); \
+ } while (0)
+
/** @defgroup grp_rt_str RTStr - String Manipulation
* Mostly UTF-8 related helpers where the standard string functions won't do.
@@ -609,8 +609,7 @@ typedef struct RTTIME
uint32_t u32Nanosecond;
/** Flags, of the RTTIME_FLAGS_* \#defines. */
uint32_t fFlags;
- /** UCT time offset in minutes (-840-840).
- * @remarks The implementation of RTTimeLocal* isn't quite there yet, so this might not be 100% correct. */
+ /** UCT time offset in minutes (-840-840). */
int32_t offUTC;
} RTTIME;
#pragma pack()
@@ -752,6 +751,16 @@ RTDECL(PRTTIME) RTTimeLocalExplode(PRTTIME pTime, PCRTTIMESPEC pTimeSpec);
*/
RTDECL(PRTTIME) RTTimeLocalNormalize(PRTTIME pTime);
+/**
+ * Converts a time structure to UTC, relying on UTC offset information
+ * if it contains local time.
+ *
+ * @returns pTime on success.
+ * @returns NULL if the data is invalid.
+ * @param pTime The time structure to convert.
+ */
+RTDECL(PRTTIME) RTTimeConvertToZulu(PRTTIME pTime);
+
/**
* Converts a time spec to a ISO date string.
*
@@ -785,6 +794,21 @@ RTDECL(PRTTIME) RTTimeFromString(PRTTIME pTime, const char *pszString);
*/
RTDECL(bool) RTTimeIsLeapYear(int32_t i32Year);
+/**
+ * Compares two normalized time structures.
+ *
+ * @retval 0 if equal.
+ * @retval -1 if @a pLeft is earlier than @a pRight.
+ * @retval 1 if @a pRight is earlier than @a pLeft.
+ *
+ * @param pLeft The left side time. NULL is accepted.
+ * @param pRight The right side time. NULL is accepted.
+ *
+ * @note A NULL time is considered smaller than anything else. If both are
+ * NULL, they are considered equal.
+ */
+RTDECL(int) RTTimeCompare(PCRTTIME pLeft, PCRTTIME pRight);
+
/**
* Gets the current nanosecond timestamp.
*
@@ -1 +1 @@
-#define VBOX_SVN_REV 120293
+#define VBOX_SVN_REV 123745
@@ -50,7 +50,11 @@ static void sf_timespec_from_ftime(RTTIMESPEC *ts, time_t *time)
RTTimeSpecSetNano(ts, t);
}
#else /* >= 2.6.0 */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
static void sf_ftime_from_timespec(struct timespec *tv, RTTIMESPEC *ts)
+#else
+static void sf_ftime_from_timespec(struct timespec64 *tv, RTTIMESPEC *ts)
+#endif
{
int64_t t = RTTimeSpecGetNano(ts);
int64_t nsec;
@@ -60,7 +64,11 @@ static void sf_ftime_from_timespec(struct timespec *tv, RTTIMESPEC *ts)
tv->tv_nsec = nsec;
}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
static void sf_timespec_from_ftime(RTTIMESPEC *ts, struct timespec *tv)
+#else
+static void sf_timespec_from_ftime(RTTIMESPEC *ts, struct timespec64 *tv)
+#endif
{
int64_t t = (int64_t)tv->tv_nsec + (int64_t)tv->tv_sec * 1000000000;
RTTimeSpecSetNano(ts, t);
@@ -79,10 +87,7 @@ void sf_init_inode(struct sf_glob_info *sf_g, struct inode *inode,
attr = &info->Attr;
#define mode_set(r) attr->fMode & (RTFS_UNIX_##r) ? (S_##r) : 0;
- mode = mode_set(ISUID);
- mode |= mode_set(ISGID);
-
- mode |= mode_set(IRUSR);
+ mode = mode_set(IRUSR);
mode |= mode_set(IWUSR);
mode |= mode_set(IXUSR);
@@ -360,9 +365,7 @@ int sf_setattr(struct dentry *dentry, struct iattr *iattr)
RT_ZERO(info);
if (iattr->ia_valid & ATTR_MODE)
{
- info.Attr.fMode = mode_set(ISUID);
- info.Attr.fMode |= mode_set(ISGID);
- info.Attr.fMode |= mode_set(IRUSR);
+ info.Attr.fMode = mode_set(IRUSR);
info.Attr.fMode |= mode_set(IWUSR);
info.Attr.fMode |= mode_set(IXUSR);
info.Attr.fMode |= mode_set(IRGRP);
@@ -3,9 +3,9 @@
#define VBOX_VERSION_MAJOR 5
#define VBOX_VERSION_MINOR 2
-#define VBOX_VERSION_BUILD 6
-#define VBOX_VERSION_STRING_RAW "5.2.6"
-#define VBOX_VERSION_STRING "5.2.6_KernelUbuntu"
+#define VBOX_VERSION_BUILD 18
+#define VBOX_VERSION_STRING_RAW "5.2.18"
+#define VBOX_VERSION_STRING "5.2.18_KernelUbuntu"
#define VBOX_API_VERSION_STRING "5_2"
#define VBOX_PRIVATE_BUILD_DESC "Private build by buildd"
@@ -54,10 +54,10 @@ bool VBoxHGSMIIsSupported(void)
*/
int hgsmi_report_flags_location(struct gen_pool * ctx, u32 location)
{
- struct hgsmi_buffer_location *p;
/* Allocate the IO buffer. */
- p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI,
+ struct hgsmi_buffer_location *p =
+ (struct hgsmi_buffer_location *)hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI,
HGSMI_CC_HOST_FLAGS_LOCATION);
if (!p)
return VERR_NO_MEMORY;
@@ -84,10 +84,10 @@ int hgsmi_report_flags_location(struct gen_pool * ctx, u32 location)
*/
int hgsmi_send_caps_info(struct gen_pool * ctx, u32 caps)
{
- struct vbva_caps *p;
/* Allocate the IO buffer. */
- p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS);
+ struct vbva_caps *p =
+ (struct vbva_caps *)hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS);
if (!p)
return VERR_NO_MEMORY;
@@ -154,8 +154,7 @@ int hgsmi_query_conf(struct gen_pool * ctx, u32 index, u32 *value_ret)
struct vbva_conf32 *p;
/* Allocate the IO buffer. */
- p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
- VBVA_QUERY_CONF32);
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_QUERY_CONF32);
if (!p)
return VERR_NO_MEMORY;
@@ -33,16 +33,16 @@
#define HGSMIOFFSET_VOID ((u32)~0)
-/* Describes a shared memory area buffer.
+/**
+ * Describes a shared memory area buffer.
+ *
* Used for calculations with offsets and for buffers verification.
*/
typedef struct HGSMIAREA {
- u8 *pu8Base; /* The starting address of the area. Corresponds to offset 'offBase'. */
- u32 offBase; /* The starting offset of the area. */
- u32 offLast; /* The last valid offset:
- * offBase + cbArea - 1 - (sizeof(header) + sizeof(tail)).
- */
- u32 cbArea; /* Size of the area. */
+ u8 *pu8Base; /**< The starting address of the area. Corresponds to offset 'offBase'. */
+ u32 offBase; /**< The starting offset of the area. */
+ u32 offLast; /**< The last valid offset: offBase + cbArea - 1 - (sizeof(header) + sizeof(tail)). */
+ u32 cbArea; /**< Size of the area. */
} HGSMIAREA;
@@ -102,14 +102,14 @@ int VBoxHGSMISendViewInfo(struct gen_pool * ctx,
{
int rc;
/* Issue the screen info command. */
- void *p = hgsmi_buffer_alloc(ctx, sizeof(VBVAINFOVIEW) * u32Count,
+ VBVAINFOVIEW *pInfo =
+ (VBVAINFOVIEW *)hgsmi_buffer_alloc(ctx, sizeof(VBVAINFOVIEW) * u32Count,
HGSMI_CH_VBVA, VBVA_INFO_VIEW);
- if (p) {
- VBVAINFOVIEW *pInfo = (VBVAINFOVIEW *)p;
- rc = pfnFill(pvData, pInfo, u32Count);
+ if (pInfo) {
+ rc = pfnFill(pvData, (VBVAINFOVIEW *)pInfo /* lazy bird */, u32Count);
if (RT_SUCCESS(rc))
- hgsmi_buffer_submit (ctx, p);
- hgsmi_buffer_free(ctx, p);
+ hgsmi_buffer_submit(ctx, pInfo);
+ hgsmi_buffer_free(ctx, pInfo);
} else
rc = VERR_NO_MEMORY;
return rc;
@@ -238,15 +238,10 @@ void hgsmi_process_display_info(struct gen_pool * ctx,
u16 flags)
{
/* Issue the screen info command. */
- void *p = hgsmi_buffer_alloc(ctx,
- sizeof (VBVAINFOSCREEN),
- HGSMI_CH_VBVA,
- VBVA_INFO_SCREEN);
- if (!p) {
- // LogFunc(("HGSMIHeapAlloc failed\n"));
- } else {
- VBVAINFOSCREEN *pScreen = (VBVAINFOSCREEN *)p;
-
+ VBVAINFOSCREEN *pScreen =
+ (VBVAINFOSCREEN *)hgsmi_buffer_alloc(ctx, sizeof(VBVAINFOSCREEN),
+ HGSMI_CH_VBVA, VBVA_INFO_SCREEN);
+ if (pScreen != NULL) {
pScreen->view_index = display;
pScreen->origin_x = origin_x;
pScreen->origin_y = origin_y;
@@ -257,9 +252,11 @@ void hgsmi_process_display_info(struct gen_pool * ctx,
pScreen->bits_per_pixel = bpp;
pScreen->flags = flags;
- hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_submit(ctx, pScreen);
- hgsmi_buffer_free(ctx, p);
+ hgsmi_buffer_free(ctx, pScreen);
+ } else {
+ // LogFunc(("HGSMIHeapAlloc failed\n"));
}
}
@@ -279,7 +276,7 @@ void hgsmi_process_display_info(struct gen_pool * ctx,
int hgsmi_update_input_mapping(struct gen_pool * ctx, s32 origin_x, s32 origin_y,
u32 width, u32 height)
{
- int rc = VINF_SUCCESS;
+ int rc;
struct vbva_report_input_mapping *p;
// Log(("%s: origin_x=%d, origin_y=%d, width=%u, height=%u\n", __PRETTY_FUNCTION__, (int)origin_x, (int)origin_x,
// (unsigned)width, (unsigned)height));
@@ -316,32 +313,27 @@ int hgsmi_get_mode_hints(struct gen_pool * ctx,
unsigned screens, struct vbva_modehint *hints)
{
int rc;
- void *p;
+ struct vbva_query_mode_hints *pQuery;
- WARN_ON_ONCE(!((hints)));
- if (WARN_ON(!hints))
- return VERR_INVALID_POINTER;
-
- p = hgsmi_buffer_alloc(ctx, sizeof(struct vbva_query_mode_hints)
- + screens * sizeof(struct vbva_modehint),
+ assert_ptr_return(hints, VERR_INVALID_POINTER);
+ pQuery = (struct vbva_query_mode_hints *)hgsmi_buffer_alloc(ctx,
+ sizeof(struct vbva_query_mode_hints)
+ + screens * sizeof(struct vbva_modehint),
HGSMI_CH_VBVA, VBVA_QUERY_MODE_HINTS);
- if (!p) {
- // LogFunc(("HGSMIHeapAlloc failed\n"));
- return VERR_NO_MEMORY;
- } else {
- struct vbva_query_mode_hints *pQuery = p;
-
+ if (pQuery != NULL) {
pQuery->hints_queried_count = screens;
pQuery->cbHintStructureGuest = sizeof(struct vbva_modehint);
pQuery->rc = VERR_NOT_SUPPORTED;
- hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_submit(ctx, pQuery);
rc = pQuery->rc;
if (RT_SUCCESS(rc))
- memcpy(hints, ((u8 *)p) + sizeof(struct vbva_query_mode_hints),
- screens * sizeof(struct vbva_modehint));
+ memcpy(hints, (void *)(pQuery + 1), screens * sizeof(struct vbva_modehint));
- hgsmi_buffer_free(ctx, p);
+ hgsmi_buffer_free(ctx, pQuery);
+ } else {
+ // LogFunc(("HGSMIHeapAlloc failed\n"));
+ rc = VERR_NO_MEMORY;
}
return rc;
}
@@ -1 +1 @@
-#define VBOX_SVN_REV 120293
+#define VBOX_SVN_REV 123745
@@ -267,7 +267,7 @@ static struct drm_driver driver = {
.master_set = vbox_master_set,
.master_drop = vbox_master_drop,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) || defined(RHEL_73)
-# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
+# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) && !defined(RHEL_75)
.set_busid = drm_pci_set_busid,
# endif
#endif
@@ -44,12 +44,33 @@
#include <linux/string.h>
#if defined(RHEL_MAJOR) && defined(RHEL_MINOR)
+# if RHEL_MAJOR == 7 && RHEL_MINOR >= 5
+# define RHEL_75
+# endif
# if RHEL_MAJOR == 7 && RHEL_MINOR >= 4
-# define RHEL_73
# define RHEL_74
-# elif RHEL_MAJOR == 7 && RHEL_MINOR >= 3
+# endif
+# if RHEL_MAJOR == 7 && RHEL_MINOR >= 3
# define RHEL_73
# endif
+# if RHEL_MAJOR == 7 && RHEL_MINOR >= 1
+# define RHEL_71
+# endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) || defined(RHEL_71)
+#define U8_MAX ((u8)~0U)
+#define S8_MAX ((s8)(U8_MAX>>1))
+#define S8_MIN ((s8)(-S8_MAX - 1))
+#define U16_MAX ((u16)~0U)
+#define S16_MAX ((s16)(U16_MAX>>1))
+#define S16_MIN ((s16)(-S16_MAX - 1))
+#define U32_MAX ((u32)~0U)
+#define S32_MAX ((s32)(U32_MAX>>1))
+#define S32_MIN ((s32)(-S32_MAX - 1))
+#define U64_MAX ((u64)~0ULL)
+#define S64_MAX ((s64)(U64_MAX>>1))
+#define S64_MIN ((s64)(-S64_MAX - 1))
#endif
#include <drm/drmP.h>
@@ -57,7 +78,7 @@
#include <drm/drm_gem.h>
#endif
#include <drm/drm_fb_helper.h>
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || defined(RHEL_75)
#include <drm/drm_encoder.h>
#endif
@@ -154,7 +175,7 @@ struct vbox_private {
#undef CURSOR_DATA_SIZE
int vbox_driver_load(struct drm_device *dev, unsigned long flags);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || defined(RHEL_75)
void vbox_driver_unload(struct drm_device *dev);
#else
int vbox_driver_unload(struct drm_device *dev);
@@ -59,6 +59,8 @@ extern int vbox_assert_var[1];
vbox_assert_var[(expr) ? 1 : 0] __attribute__((__unused__))
#define assert_compile_size(type, size) \
assert_compile(sizeof(type) == (size))
+#define assert_ptr_return(ptr,ret) \
+ do { if (unlikely(!(ptr))) { WARN_ON_ONCE(!(ptr)); return ret; } } while (0)
/** @} */
@@ -312,7 +312,7 @@ static int vboxfb_create(struct drm_fb_helper *helper,
info->fix.smem_start = 0;
info->fix.smem_len = size;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || defined(RHEL_75)
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
#else
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
@@ -395,7 +395,7 @@ int vbox_fbdev_init(struct drm_device *dev)
#else
drm_fb_helper_prepare(dev, &fbdev->helper, &vbox_fb_helper_funcs);
#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || defined(RHEL_75)
ret = drm_fb_helper_init(dev, &fbdev->helper, vbox->num_crtcs);
#else
ret =
@@ -167,7 +167,7 @@ int vbox_framebuffer_init(struct drm_device *dev,
{
int ret;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || defined(RHEL_75)
drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
#else
drm_helper_mode_fill_fb_struct(&vbox_fb->base, mode_cmd);
@@ -399,7 +399,7 @@ int vbox_driver_load(struct drm_device *dev, unsigned long flags)
return ret;
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || defined(RHEL_75)
void vbox_driver_unload(struct drm_device *dev)
#else
int vbox_driver_unload(struct drm_device *dev)
@@ -421,7 +421,7 @@ int vbox_driver_unload(struct drm_device *dev)
pci_iounmap(dev->pdev, vbox->guest_heap);
kfree(vbox);
dev->dev_private = NULL;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) && !defined(RHEL_75)
return 0;
#endif
}
@@ -64,7 +64,7 @@ static void vbox_do_modeset(struct drm_crtc *crtc,
width = mode->hdisplay ? mode->hdisplay : 640;
height = mode->vdisplay ? mode->vdisplay : 480;
crtc_id = vbox_crtc->crtc_id;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || defined(RHEL_75)
bpp = crtc->enabled ? CRTC_FB(crtc)->format->cpp[0] * 8 : 32;
pitch = crtc->enabled ? CRTC_FB(crtc)->pitches[0] : width * bpp / 8;
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
@@ -88,7 +88,7 @@ static void vbox_do_modeset(struct drm_crtc *crtc,
vbox_crtc->fb_offset % (bpp / 8) == 0)
VBoxVideoSetModeRegisters(
width, height, pitch * 8 / bpp,
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || defined(RHEL_75)
CRTC_FB(crtc)->format->cpp[0] * 8,
#else
CRTC_FB(crtc)->bits_per_pixel,
@@ -763,13 +763,6 @@ static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
size_t data_size, mask_size;
bool src_isiomem;
- /*
- * Re-set this regularly as in 5.0.20 and earlier the information was
- * lost on save and restore.
- */
- hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
- vbox->input_mapping_width,
- vbox->input_mapping_height);
if (!handle) {
bool cursor_enabled = false;
struct drm_crtc *crtci;
@@ -819,8 +812,8 @@ static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
*/
mask_size = ((width + 7) / 8 * height + 3) & ~3;
data_size = width * height * 4 + mask_size;
- vbox->cursor_hot_x = min_t(u32, max(hot_x, 0), width);
- vbox->cursor_hot_y = min_t(u32, max(hot_y, 0), height);
+ vbox->cursor_hot_x = hot_x;
+ vbox->cursor_hot_y = hot_y;
vbox->cursor_width = width;
vbox->cursor_height = height;
vbox->cursor_data_size = data_size;
@@ -870,42 +863,21 @@ static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct vbox_private *vbox = crtc->dev->dev_private;
- u32 flags = VBOX_MOUSE_POINTER_VISIBLE |
- VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA;
s32 crtc_x =
vbox->single_framebuffer ? crtc->x : to_vbox_crtc(crtc)->x_hint;
s32 crtc_y =
vbox->single_framebuffer ? crtc->y : to_vbox_crtc(crtc)->y_hint;
- u32 host_x, host_y;
- u32 hot_x = 0;
- u32 hot_y = 0;
int rc;
- /*
- * We compare these to unsigned later and don't
- * need to handle negative.
- */
- if (x + crtc_x < 0 || y + crtc_y < 0 || vbox->cursor_data_size == 0)
+ x += vbox->cursor_hot_x;
+ y += vbox->cursor_hot_y;
+ if (x + crtc_x < 0 || y + crtc_y < 0 ||
+ x + crtc_x >= vbox->input_mapping_width ||
+ y + crtc_y >= vbox->input_mapping_width ||
+ vbox->cursor_data_size == 0)
return 0;
-
rc = hgsmi_cursor_position(vbox->guest_pool, true, x + crtc_x,
- y + crtc_y, &host_x, &host_y);
- /* Work around a bug after save and restore in 5.0.20 and earlier. */
- if (RT_FAILURE(rc) || (host_x == 0 && host_y == 0))
- return rc == VINF_SUCCESS ? 0
- : rc == VERR_NO_MEMORY ? -ENOMEM : -EINVAL;
- if (x + crtc_x < host_x)
- hot_x = min(host_x - x - crtc_x, vbox->cursor_width);
- if (y + crtc_y < host_y)
- hot_y = min(host_y - y - crtc_y, vbox->cursor_height);
- if (hot_x == vbox->cursor_hot_x && hot_y == vbox->cursor_hot_y)
- return 0;
- vbox->cursor_hot_x = hot_x;
- vbox->cursor_hot_y = hot_y;
- rc = hgsmi_update_pointer_shape(vbox->guest_pool, flags, hot_x, hot_y,
- vbox->cursor_width,
- vbox->cursor_height, vbox->cursor_data,
- vbox->cursor_data_size);
+ y + crtc_y, NULL, NULL);
return rc == VINF_SUCCESS ? 0 : rc == VERR_NO_MEMORY ? -ENOMEM : rc ==
- VERR_NOT_SUPPORTED ? -EBUSY : -EINVAL;
+ VERR_NOT_SUPPORTED ? -EBUSY : -EINVAL;
}
@@ -193,22 +193,6 @@ static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
{
}
-static int vbox_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem)
-{
- int r;
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) && !defined(RHEL_74)
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) && !defined(RHEL_74)
- r = ttm_bo_move_memcpy(bo, evict, interruptible, no_wait_gpu, new_mem);
-#else
- r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
-#endif
- return r;
-}
-
static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
@@ -219,10 +203,15 @@ static struct ttm_backend_func vbox_tt_backend_func = {
.destroy = &vbox_ttm_backend_destroy,
};
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)
static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size,
u32 page_flags,
struct page *dummy_read_page)
+#else
+static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
+ u32 page_flags)
+#endif
{
struct ttm_tt *tt;
@@ -231,7 +220,11 @@ static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
return NULL;
tt->func = &vbox_tt_backend_func;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)
if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+#else
+ if (ttm_tt_init(tt, bo, page_flags)) {
+#endif
kfree(tt);
return NULL;
}
@@ -239,36 +232,51 @@ static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
return tt;
}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)
+# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
{
return ttm_pool_populate(ttm);
}
+# else
+static int vbox_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
+{
+ return ttm_pool_populate(ttm, ctx);
+}
+# endif
static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
ttm_pool_unpopulate(ttm);
}
+#endif
struct ttm_bo_driver vbox_bo_driver = {
.ttm_tt_create = vbox_ttm_tt_create,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)
.ttm_tt_populate = vbox_ttm_tt_populate,
.ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
+#endif
.init_mem_type = vbox_bo_init_mem_type,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || defined(RHEL_74)
.eviction_valuable = ttm_bo_eviction_valuable,
#endif
.evict_flags = vbox_bo_evict_flags,
- .move = vbox_bo_move,
.verify_access = vbox_bo_verify_access,
.io_mem_reserve = &vbox_ttm_io_mem_reserve,
.io_mem_free = &vbox_ttm_io_mem_free,
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) || defined(RHEL_75)
+# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
.io_mem_pfn = ttm_bo_default_io_mem_pfn,
+# endif
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) \
|| defined(RHEL_74)
+# ifndef RHEL_75
.lru_tail = &ttm_bo_default_lru_tail,
.swap_lru_tail = &ttm_bo_default_swap_lru_tail,
+# endif
#endif
};
@@ -397,7 +405,11 @@ int vbox_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
ttm_bo_type_device, &vboxbo->placement,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)
align >> PAGE_SHIFT, false, NULL, acc_size,
+#else
+ align >> PAGE_SHIFT, false, acc_size,
+#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) || defined(RHEL_73)
NULL,
#endif
@@ -417,6 +429,9 @@ static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
+ struct ttm_operation_ctx ctx = { false, false };
+#endif
int i, ret;
if (bo->pin_count) {
@@ -432,7 +447,11 @@ int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
for (i = 0; i < bo->placement.num_placement; i++)
PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+#else
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
+#endif
if (ret)
return ret;
@@ -446,6 +465,9 @@ int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
int vbox_bo_unpin(struct vbox_bo *bo)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
+ struct ttm_operation_ctx ctx = { false, false };
+#endif
int i, ret;
if (!bo->pin_count) {
@@ -459,7 +481,11 @@ int vbox_bo_unpin(struct vbox_bo *bo)
for (i = 0; i < bo->placement.num_placement; i++)
PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+#else
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
+#endif
if (ret)
return ret;
@@ -473,6 +499,9 @@ int vbox_bo_unpin(struct vbox_bo *bo)
*/
int vbox_bo_push_sysram(struct vbox_bo *bo)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
+ struct ttm_operation_ctx ctx = { false, false };
+#endif
int i, ret;
if (!bo->pin_count) {
@@ -491,7 +520,11 @@ int vbox_bo_push_sysram(struct vbox_bo *bo)
for (i = 0; i < bo->placement.num_placement; i++)
PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+#else
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
+#endif
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
@@ -329,47 +329,53 @@ typedef enum {
, VBOXVHWACMD_TYPE_HH_SAVESTATE_LOADPERFORM
} VBOXVHWACMD_TYPE;
-/* the command processing was asynch, set by the host to indicate asynch command completion
- * must not be cleared once set, the command completion is performed by issuing a host->guest completion command
- * while keeping this flag unchanged */
-#define VBOXVHWACMD_FLAG_HG_ASYNCH 0x00010000
-/* asynch completion is performed by issuing the event */
-#define VBOXVHWACMD_FLAG_GH_ASYNCH_EVENT 0x00000001
-/* issue interrupt on asynch completion */
-#define VBOXVHWACMD_FLAG_GH_ASYNCH_IRQ 0x00000002
-/* guest does not do any op on completion of this command, the host may copy the command and indicate that it does not need the command anymore
+/** The command processing was asynch, set by the host to indicate asynch
+ * command completion. Must not be cleared once set, the command completion is
+ * performed by issuing a host->guest completion command while keeping this
+ * flag unchanged */
+#define VBOXVHWACMD_FLAG_HG_ASYNCH 0x00010000u
+/** asynch completion is performed by issuing the event */
+#define VBOXVHWACMD_FLAG_GH_ASYNCH_EVENT 0x00000001u
+/** issue interrupt on asynch completion */
+#define VBOXVHWACMD_FLAG_GH_ASYNCH_IRQ 0x00000002u
+/** Guest does not do any op on completion of this command, the host may copy
+ * the command and indicate that it does not need the command anymore
* by setting the VBOXVHWACMD_FLAG_HG_ASYNCH_RETURNED flag */
-#define VBOXVHWACMD_FLAG_GH_ASYNCH_NOCOMPLETION 0x00000004
-/* the host has copied the VBOXVHWACMD_FLAG_GH_ASYNCH_NOCOMPLETION command and returned it to the guest */
-#define VBOXVHWACMD_FLAG_HG_ASYNCH_RETURNED 0x00020000
-/* this is the host->host cmd, i.e. a configuration command posted by the host to the framebuffer */
-#define VBOXVHWACMD_FLAG_HH_CMD 0x10000000
+#define VBOXVHWACMD_FLAG_GH_ASYNCH_NOCOMPLETION 0x00000004u
+/** the host has copied the VBOXVHWACMD_FLAG_GH_ASYNCH_NOCOMPLETION command and returned it to the guest */
+#define VBOXVHWACMD_FLAG_HG_ASYNCH_RETURNED 0x00020000u
+/** this is the host->host cmd, i.e. a configuration command posted by the host to the framebuffer */
+#define VBOXVHWACMD_FLAG_HH_CMD 0x10000000u
typedef struct VBOXVHWACMD {
- VBOXVHWACMD_TYPE enmCmd; /* command type */
- volatile s32 rc; /* command result */
- s32 iDisplay; /* display index */
- volatile s32 Flags; /* ored VBOXVHWACMD_FLAG_xxx values */
- uint64_t GuestVBVAReserved1; /* field internally used by the guest VBVA cmd handling, must NOT be modified by clients */
- uint64_t GuestVBVAReserved2; /* field internally used by the guest VBVA cmd handling, must NOT be modified by clients */
+ VBOXVHWACMD_TYPE enmCmd; /**< command type */
+ volatile s32 rc; /**< command result */
+ s32 iDisplay; /**< display index */
+ volatile s32 Flags; /**< ORed VBOXVHWACMD_FLAG_xxx values */
+ uint64_t GuestVBVAReserved1; /**< field internally used by the guest VBVA cmd handling, must NOT be modified by clients */
+ uint64_t GuestVBVAReserved2; /**< field internally used by the guest VBVA cmd handling, must NOT be modified by clients */
volatile u32 cRefs;
s32 Reserved;
union {
struct VBOXVHWACMD *pNext;
- u32 offNext;
- uint64_t Data; /* the body is 64-bit aligned */
+ u32 offNext;
+ uint64_t Data; /**< the body is 64-bit aligned */
} u;
char body[1];
} VBOXVHWACMD;
-#define VBOXVHWACMD_HEADSIZE() (RT_OFFSETOF(VBOXVHWACMD, body))
-#define VBOXVHWACMD_SIZE_FROMBODYSIZE(_s) (VBOXVHWACMD_HEADSIZE() + (_s))
-#define VBOXVHWACMD_SIZE(_tCmd) (VBOXVHWACMD_SIZE_FROMBODYSIZE(sizeof(_tCmd)))
+#define VBOXVHWACMD_HEADSIZE() (RT_OFFSETOF(VBOXVHWACMD, body))
+#define VBOXVHWACMD_SIZE_FROMBODYSIZE(a_cbBody) (VBOXVHWACMD_HEADSIZE() + (a_cbBody))
+#define VBOXVHWACMD_SIZE(a_tTypeCmd) (VBOXVHWACMD_SIZE_FROMBODYSIZE(sizeof(a_tTypeCmd)))
typedef unsigned int VBOXVHWACMD_LENGTH;
typedef uint64_t VBOXVHWA_SURFHANDLE;
-#define VBOXVHWA_SURFHANDLE_INVALID 0ULL
-#define VBOXVHWACMD_BODY(_p, _t) ((_t*)(_p)->body)
-#define VBOXVHWACMD_HEAD(_pb) ((VBOXVHWACMD*)((u8 *)(_pb) - RT_OFFSETOF(VBOXVHWACMD, body)))
+#define VBOXVHWA_SURFHANDLE_INVALID UINT64_C(0)
+#define VBOXVHWACMD_BODY(a_pHdr, a_TypeBody) ( (a_TypeBody *)&(a_pHdr)->body[0] )
+#if !defined(IN_GUEST) && defined(IN_RING3)
+# define VBOXVHWACMD_BODY_HOST_HEAP(a_pHdr, a_TypeBody) ( (a_TypeBody *)&(a_pHdr)->body[0] )
+#endif
+#define VBOXVHWACMD_HEAD(a_pBody)\
+ ( (VBOXVHWACMD *)((u8 *)(a_pBody) - RT_OFFSETOF(VBOXVHWACMD, body)))
typedef struct VBOXVHWA_RECTL {
s32 left;
@@ -888,12 +894,13 @@ typedef struct VBVAHOSTCMD {
uint64_t Data; /* the body is 64-bit aligned */
} u;
char body[1];
-}VBVAHOSTCMD;
+} VBVAHOSTCMD;
-#define VBVAHOSTCMD_SIZE(_size) (sizeof(VBVAHOSTCMD) + (_size))
-#define VBVAHOSTCMD_BODY(_pCmd, _tBody) ((_tBody*)(_pCmd)->body)
-#define VBVAHOSTCMD_HDR(_pBody) ((VBVAHOSTCMD*)(((u8*)_pBody) - RT_OFFSETOF(VBVAHOSTCMD, body)))
-#define VBVAHOSTCMD_HDRSIZE (RT_OFFSETOF(VBVAHOSTCMD, body))
+#define VBVAHOSTCMD_SIZE(a_cb) (sizeof(VBVAHOSTCMD) + (a_cb))
+#define VBVAHOSTCMD_BODY(a_pCmd, a_TypeBody) ((a_TypeBody *)&(a_pCmd)->body[0])
+#define VBVAHOSTCMD_HDR(a_pBody) \
+ ( (VBVAHOSTCMD *)( (u8 *)(a_pBody) - RT_OFFSETOF(VBVAHOSTCMD, body)) )
+#define VBVAHOSTCMD_HDRSIZE (RT_OFFSETOF(VBVAHOSTCMD, body))
#pragma pack()
@@ -1256,16 +1263,17 @@ typedef enum {
#define VBOXSHGSMI_FLAG_GH_SYNCH 0x00000040
-static inline u8 * VBoxSHGSMIBufferData (const VBOXSHGSMIHEADER* pHeader)
+static inline u8 *
+VBoxSHGSMIBufferData(const VBOXSHGSMIHEADER *pHeader)
{
- return (u8 *)pHeader + sizeof (VBOXSHGSMIHEADER);
+ return (u8 *)pHeader + sizeof(VBOXSHGSMIHEADER);
}
-#define VBoxSHGSMIBufferHeaderSize() (sizeof (VBOXSHGSMIHEADER))
+#define VBoxSHGSMIBufferHeaderSize() (sizeof(VBOXSHGSMIHEADER))
-static inline PVBOXSHGSMIHEADER VBoxSHGSMIBufferHeader (const void *pvData)
+static inline VBOXSHGSMIHEADER * VBoxSHGSMIBufferHeader(const void *pvData)
{
- return (PVBOXSHGSMIHEADER)((u8 *)pvData - sizeof (VBOXSHGSMIHEADER));
+ return (VBOXSHGSMIHEADER *)((uintptr_t)pvData - sizeof(VBOXSHGSMIHEADER));
}
#ifdef VBOX_WITH_VDMA
@@ -1280,14 +1288,15 @@ typedef enum {
VBOXVDMA_CTL_TYPE_ENABLE,
VBOXVDMA_CTL_TYPE_DISABLE,
VBOXVDMA_CTL_TYPE_FLUSH,
- VBOXVDMA_CTL_TYPE_WATCHDOG
+ VBOXVDMA_CTL_TYPE_WATCHDOG,
+ VBOXVDMA_CTL_TYPE_END
} VBOXVDMA_CTL_TYPE;
typedef struct VBOXVDMA_CTL {
VBOXVDMA_CTL_TYPE enmCtl;
u32 offset;
s32 result;
-} VBOXVDMA_CTL, *PVBOXVDMA_CTL;
+} VBOXVDMA_CTL;
typedef struct VBOXVDMA_RECTL {
int16_t left;
@@ -1354,7 +1363,7 @@ typedef uint64_t VBOXVDMASURFHANDLE;
/* command buffer follows the VBOXVDMACBUF_DR in VRAM, VBOXVDMACBUF_DR::phBuf is ignored */
#define VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR 0x00000002
-/*
+/**
* We can not submit the DMA command via VRAM since we do not have control over
* DMA command buffer [de]allocation, i.e. we only control the buffer contents.
* In other words the system may call one of our callbacks to fill a command buffer
@@ -1377,21 +1386,25 @@ typedef struct VBOXVDMACBUF_DR {
uint64_t aGuestData[7];
} VBOXVDMACBUF_DR, *PVBOXVDMACBUF_DR;
-#define VBOXVDMACBUF_DR_TAIL(_pCmd, _t) ( (_t*)(((u8*)(_pCmd)) + sizeof (VBOXVDMACBUF_DR)) )
-#define VBOXVDMACBUF_DR_FROM_TAIL(_pCmd) ( (VBOXVDMACBUF_DR*)(((u8*)(_pCmd)) - sizeof (VBOXVDMACBUF_DR)) )
+#define VBOXVDMACBUF_DR_TAIL(a_pCmd, a_TailType) \
+ ( (a_TailType *)( ((u8*)(a_pCmd)) + sizeof(VBOXVDMACBUF_DR)) )
+#define VBOXVDMACBUF_DR_FROM_TAIL(a_pCmd) \
+ ( (VBOXVDMACBUF_DR *)( ((u8*)(a_pCmd)) - sizeof(VBOXVDMACBUF_DR)) )
typedef struct VBOXVDMACMD {
VBOXVDMACMD_TYPE enmType;
u32 u32CmdSpecific;
-} VBOXVDMACMD, *PVBOXVDMACMD;
-
-#define VBOXVDMACMD_HEADER_SIZE() sizeof (VBOXVDMACMD)
-#define VBOXVDMACMD_SIZE_FROMBODYSIZE(_s) (VBOXVDMACMD_HEADER_SIZE() + (_s))
-#define VBOXVDMACMD_SIZE(_t) (VBOXVDMACMD_SIZE_FROMBODYSIZE(sizeof (_t)))
-#define VBOXVDMACMD_BODY(_pCmd, _t) ( (_t*)(((u8*)(_pCmd)) + VBOXVDMACMD_HEADER_SIZE()) )
-#define VBOXVDMACMD_BODY_SIZE(_s) ( (_s) - VBOXVDMACMD_HEADER_SIZE() )
-#define VBOXVDMACMD_FROM_BODY(_pCmd) ( (VBOXVDMACMD*)(((u8*)(_pCmd)) - VBOXVDMACMD_HEADER_SIZE()) )
-#define VBOXVDMACMD_BODY_FIELD_OFFSET(_ot, _t, _f) ( (_ot)(uintptr_t)( VBOXVDMACMD_BODY(0, u8) + RT_OFFSETOF(_t, _f) ) )
+} VBOXVDMACMD;
+
+#define VBOXVDMACMD_HEADER_SIZE() sizeof(VBOXVDMACMD)
+#define VBOXVDMACMD_SIZE_FROMBODYSIZE(_s) (VBOXVDMACMD_HEADER_SIZE() + (_s))
+#define VBOXVDMACMD_SIZE(_t) (VBOXVDMACMD_SIZE_FROMBODYSIZE(sizeof (_t)))
+#define VBOXVDMACMD_BODY(a_pCmd, a_TypeBody) \
+ ( (a_TypeBody *)( ((u8 *)(a_pCmd)) + VBOXVDMACMD_HEADER_SIZE()) )
+#define VBOXVDMACMD_BODY_SIZE(_s) ( (_s) - VBOXVDMACMD_HEADER_SIZE() )
+#define VBOXVDMACMD_FROM_BODY(a_pBody) \
+ ( (VBOXVDMACMD *)( ((u8 *)(a_pBody)) - VBOXVDMACMD_HEADER_SIZE()) )
+#define VBOXVDMACMD_BODY_FIELD_OFFSET(_ot, _t, _f) ( (_ot)(uintptr_t)( VBOXVDMACMD_BODY(0, u8) + RT_OFFSETOF(_t, _f) ) )
typedef struct VBOXVDMACMD_DMA_PRESENT_BLT {
VBOXVIDEOOFFSET offSrc;
@@ -49,26 +49,19 @@ static bool vbva_write(struct vbva_buf_context * ctx,
const void *p, u32 len);
-static bool vbva_inform_host(struct vbva_buf_context * ctx,
- struct gen_pool * pHGSMICtx,
- s32 screen, bool enable)
+static bool vbva_inform_host(struct vbva_buf_context * ctx, struct gen_pool * pHGSMICtx, s32 screen, bool fEnable)
{
- bool ret = false;
+ bool fRc = false;
#if 0 /* All callers check this */
if (ppdev->bHGSMISupported)
#endif
{
- void *p = hgsmi_buffer_alloc(pHGSMICtx,
- sizeof (struct vbva_enable_ex),
- HGSMI_CH_VBVA,
- VBVA_ENABLE);
- if (!p) {
- // LogFunc(("HGSMIHeapAlloc failed\n"));
- } else {
- struct vbva_enable_ex *pEnable = p;
-
- pEnable->base.flags = enable? VBVA_F_ENABLE: VBVA_F_DISABLE;
+ struct vbva_enable_ex *pEnable =
+ (struct vbva_enable_ex *)hgsmi_buffer_alloc(pHGSMICtx, sizeof(struct vbva_enable_ex),
+ HGSMI_CH_VBVA, VBVA_ENABLE);
+ if (pEnable != NULL) {
+ pEnable->base.flags = fEnable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
pEnable->base.offset = ctx->buffer_offset;
pEnable->base.result = VERR_NOT_SUPPORTED;
if (screen >= 0) {
@@ -76,19 +69,20 @@ static bool vbva_inform_host(struct vbva_buf_context * ctx,
pEnable->screen_id = screen;
}
- hgsmi_buffer_submit(pHGSMICtx, p);
+ hgsmi_buffer_submit(pHGSMICtx, pEnable);
- if (enable) {
- ret = RT_SUCCESS(pEnable->base.result);
- } else {
- ret = true;
- }
+ if (fEnable)
+ fRc = RT_SUCCESS(pEnable->base.result);
+ else
+ fRc = true;
- hgsmi_buffer_free(pHGSMICtx, p);
+ hgsmi_buffer_free(pHGSMICtx, pEnable);
+ } else {
+ // LogFunc(("HGSMIHeapAlloc failed\n"));
}
}
- return ret;
+ return fRc;
}
/*
@@ -98,7 +92,7 @@ bool vbva_enable(struct vbva_buf_context * ctx,
struct gen_pool * pHGSMICtx,
VBVABUFFER *vbva, s32 screen)
{
- bool ret = false;
+ bool fRc = false;
// LogFlowFunc(("vbva %p\n", vbva));
@@ -122,14 +116,14 @@ bool vbva_enable(struct vbva_buf_context * ctx,
ctx->record = NULL;
ctx->vbva = vbva;
- ret = vbva_inform_host(ctx, pHGSMICtx, screen, true);
+ fRc = vbva_inform_host(ctx, pHGSMICtx, screen, true);
}
- if (!ret) {
+ if (!fRc) {
vbva_disable(ctx, pHGSMICtx, screen);
}
- return ret;
+ return fRc;
}
void vbva_disable(struct vbva_buf_context * ctx,
@@ -143,14 +137,12 @@ void vbva_disable(struct vbva_buf_context * ctx,
ctx->vbva = NULL;
vbva_inform_host(ctx, pHGSMICtx, screen, false);
-
- return;
}
bool vbva_buffer_begin_update(struct vbva_buf_context * ctx,
struct gen_pool * pHGSMICtx)
{
- bool ret = false;
+ bool fRc = false;
// LogFunc(("flags = 0x%08X\n", ctx->vbva? ctx->vbva->host_events: -1));
@@ -185,11 +177,11 @@ bool vbva_buffer_begin_update(struct vbva_buf_context * ctx,
/* Remember which record we are using. */
ctx->record = record;
- ret = true;
+ fRc = true;
}
}
- return ret;
+ return fRc;
}
void vbva_buffer_end_update(struct vbva_buf_context * ctx)
@@ -208,8 +200,6 @@ void vbva_buffer_end_update(struct vbva_buf_context * ctx)
ctx->buffer_overflow = false;
ctx->record = NULL;
-
- return;
}
/*
@@ -225,23 +215,17 @@ static u32 vbva_buffer_available (const VBVABUFFER *vbva)
static void vbva_buffer_flush(struct gen_pool * ctx)
{
/* Issue the flush command. */
- void *p = hgsmi_buffer_alloc(ctx,
- sizeof (VBVAFLUSH),
- HGSMI_CH_VBVA,
- VBVA_FLUSH);
- if (!p) {
- // LogFunc(("HGSMIHeapAlloc failed\n"));
- } else {
- VBVAFLUSH *pFlush = (VBVAFLUSH *)p;
-
+ VBVAFLUSH *pFlush =
+ (VBVAFLUSH * )hgsmi_buffer_alloc(ctx, sizeof(VBVAFLUSH), HGSMI_CH_VBVA, VBVA_FLUSH);
+ if (pFlush != NULL) {
pFlush->reserved = 0;
- hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_submit(ctx, pFlush);
- hgsmi_buffer_free(ctx, p);
+ hgsmi_buffer_free(ctx, pFlush);
+ } else {
+ // LogFunc(("HGSMIHeapAlloc failed\n"));
}
-
- return;
}
static void vbva_buffer_place_data_at(struct vbva_buf_context * ctx, const void *p,
@@ -260,8 +244,6 @@ static void vbva_buffer_place_data_at(struct vbva_buf_context * ctx, const void
memcpy (dst, p, bytes_till_boundary);
memcpy (&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
}
-
- return;
}
static bool vbva_write(struct vbva_buf_context * ctx,
@@ -3,9 +3,9 @@
#define VBOX_VERSION_MAJOR 5
#define VBOX_VERSION_MINOR 2
-#define VBOX_VERSION_BUILD 6
-#define VBOX_VERSION_STRING_RAW "5.2.6"
-#define VBOX_VERSION_STRING "5.2.6_KernelUbuntu"
+#define VBOX_VERSION_BUILD 18
+#define VBOX_VERSION_STRING_RAW "5.2.18"
+#define VBOX_VERSION_STRING "5.2.18_KernelUbuntu"
#define VBOX_API_VERSION_STRING "5_2"
#define VBOX_PRIVATE_BUILD_DESC "Private build by buildd"
https://bugs.launchpad.net/bugs/1796647 Signed-off-by: Seth Forshee <seth.forshee@canonical.com> --- ubuntu/vbox/BOM | 4 +- ubuntu/vbox/dkms.conf | 2 +- ubuntu/vbox/vboxguest/VBoxGuest-linux.c | 56 +--- ubuntu/vbox/vboxguest/common/time/time.c | 310 +++++++++++++++++- .../include/VBox/VBoxGuestCoreTypes.h | 3 + ubuntu/vbox/vboxguest/include/VBox/cdefs.h | 21 +- ubuntu/vbox/vboxguest/include/VBox/err.h | 3 + ubuntu/vbox/vboxguest/include/VBox/log.h | 3 + .../vbox/vboxguest/include/internal/magics.h | 10 +- ubuntu/vbox/vboxguest/include/iprt/asm.h | 62 +++- ubuntu/vbox/vboxguest/include/iprt/cdefs.h | 61 ++++ ubuntu/vbox/vboxguest/include/iprt/err.h | 2 + ubuntu/vbox/vboxguest/include/iprt/mangling.h | 28 ++ ubuntu/vbox/vboxguest/include/iprt/string.h | 25 ++ ubuntu/vbox/vboxguest/include/iprt/time.h | 28 +- ubuntu/vbox/vboxguest/include/iprt/x86.h | 42 ++- .../r0drv/linux/waitqueue-r0drv-linux.h | 2 +- ubuntu/vbox/vboxguest/revision-generated.h | 2 +- ubuntu/vbox/vboxguest/version-generated.h | 6 +- .../vboxsf/include/VBox/VBoxGuestCoreTypes.h | 3 + ubuntu/vbox/vboxsf/include/VBox/cdefs.h | 21 +- ubuntu/vbox/vboxsf/include/VBox/err.h | 3 + ubuntu/vbox/vboxsf/include/VBox/log.h | 3 + ubuntu/vbox/vboxsf/include/VBox/shflsvc.h | 2 + ubuntu/vbox/vboxsf/include/iprt/asm.h | 62 +++- ubuntu/vbox/vboxsf/include/iprt/cdefs.h | 61 ++++ ubuntu/vbox/vboxsf/include/iprt/err.h | 2 + ubuntu/vbox/vboxsf/include/iprt/mangling.h | 28 ++ ubuntu/vbox/vboxsf/include/iprt/string.h | 25 ++ ubuntu/vbox/vboxsf/include/iprt/time.h | 28 +- ubuntu/vbox/vboxsf/revision-generated.h | 2 +- ubuntu/vbox/vboxsf/utils.c | 17 +- ubuntu/vbox/vboxsf/version-generated.h | 6 +- ubuntu/vbox/vboxvideo/hgsmi_base.c | 11 +- ubuntu/vbox/vboxvideo/hgsmi_defs.h | 14 +- ubuntu/vbox/vboxvideo/modesetting.c | 62 ++-- ubuntu/vbox/vboxvideo/revision-generated.h | 2 +- ubuntu/vbox/vboxvideo/vbox_drv.c | 2 +- ubuntu/vbox/vboxvideo/vbox_drv.h | 29 +- ubuntu/vbox/vboxvideo/vbox_err.h | 2 + ubuntu/vbox/vboxvideo/vbox_fb.c | 4 +- ubuntu/vbox/vboxvideo/vbox_main.c | 6 +- ubuntu/vbox/vboxvideo/vbox_mode.c | 52 +-- ubuntu/vbox/vboxvideo/vbox_ttm.c | 69 +++- ubuntu/vbox/vboxvideo/vboxvideo.h | 117 ++++--- ubuntu/vbox/vboxvideo/vbva_base.c | 78 ++--- ubuntu/vbox/vboxvideo/version-generated.h | 6 +- 47 files changed, 1063 insertions(+), 324 deletions(-)