You need to #include <time.h> for struct timespec to be defined.
Why are NS_PER_SECOND and MAX_NS enum members? There's no advantage compared to plain old const long. I'm not sure why we need two constants anyway; the code that uses MAX_NS is simplified if we use NS_PER_SECOND instead:
long total_ns = t1.tv_nsec + t2.tv_nsec;
time_t total_s = t1.tv_sec + t2.tv_sec;
while (total_ns >= NS_PER_SECOND) {
total_ns -= NS_PER_SECOND;
++total_s;
}
(Note the use of the correct types in this version).
I think this logic is wrong:
if (t.tv_sec > 0 && t.tv_nsec < 0) {
t.tv_nsec += NS_PER_SECOND;
t.tv_sec--;
} else if (t.tv_sec < 0 && t.tv_nsec > 0) {
t.tv_nsec -= NS_PER_SECOND;
t.tv_sec++;
}
I'm reasonably sure that tv_nsec should always be positive, even when tv_sec is negative. So we can simplify that to just:
if (t.tv_nsec < 0) {
t.tv_nsec += NS_PER_SECOND;
t.tv_sec--;
}
Note that the logic in timespec_add() already assumes positive tv_nsec.
This is highly susceptible to overflow:
unsigned int total_ns = t.tv_nsec + (t.tv_sec * (MAX_NS+1));
The whole reason we have struct timespec is that we might need to represent values outside the range of the integer types. Probably better to use divmod to divide tv_nsec by n, and add the remainder to nsec before dividing - we need to be very careful here to avoid overflow. Again, an unsigned type is inconsistent with other code.
Modified code
Here's my version of these functions:
#include <time.h>
const long NS_PER_SECOND = 1000000000L;
struct timespec timespec_sub(const struct timespec t1, const struct timespec t2)
{
struct timespec t;
t.tv_nsec = t2.tv_nsec - t1.tv_nsec;
t.tv_sec = t2.tv_sec - t1.tv_sec;
if (t.tv_nsec < 0) {
t.tv_nsec += NS_PER_SECOND;
t.tv_sec--;
}
return t;
}
struct timespec timespec_add(const struct timespec t1, const struct timespec t2)
{
struct timespec t = { t1.tv_sec + t2.tv_sec, t1.tv_nsec + t2.tv_nsec };
if (t.tv_nsec >= NS_PER_SECOND) {
t.tv_nsec -= NS_PER_SECOND;
t.tv_sec++;
}
return t;
}
struct timespec timespec_divide(struct timespec t, const int n)
{
time_t remainder_secs = t.tv_sec % n;
t.tv_sec /= n;
t.tv_nsec /= n;
t.tv_nsec +=
remainder_secs * (NS_PER_SECOND / n) +
remainder_secs * (NS_PER_SECOND % n) / n;
while (t.tv_nsec >= NS_PER_SECOND) {
t.tv_nsec -= NS_PER_SECOND;
t.tv_sec++;
}
return t;
}
And a primitive unit-test:
int main(void)
{
const struct timespec a = { 1, 905234817 };
struct timespec a_2 = timespec_add(a, a);
struct timespec a_4 = timespec_add(a_2, a_2);
struct timespec a_5 = timespec_add(a_4, a);
struct timespec z = timespec_sub(a, timespec_divide(a_5, 5));
return z.tv_sec || z.tv_nsec;
}
You should expand on the testing, to prove correctness for the tricky cases where overflow could happen.
Further simplification
We can separate out the code to normalise out-of-range nanoseconds into its own function:
struct timespec timespec_normalise(struct timespec t)
{
t.tv_sec += t.tv_nsec / NS_PER_SECOND;
if ((t.tv_nsec %= NS_PER_SECOND) < 0) {
/* division rounds towards zero, since C99 */
t.tv_nsec += NS_PER_SECOND;
--t.tv_sec;
}
return t;
}
Then the functions can use that, to make them shorter and simpler:
struct timespec timespec_sub(const struct timespec t1, const struct timespec t2)
{
struct timespec t = { t2.tv_nsec - t1.tv_nsec, t2.tv_sec - t1.tv_sec };
return timespec_normalise(t);
}
struct timespec timespec_add(const struct timespec t1, const struct timespec t2)
{
struct timespec t = { t1.tv_sec + t2.tv_sec, t1.tv_nsec + t2.tv_nsec };
return timespec_normalise(t);
}
struct timespec timespec_divide(struct timespec t, const int n)
{
time_t remainder_secs = t.tv_sec % n;
t.tv_sec /= n;
t.tv_nsec /= n;
t.tv_nsec +=
remainder_secs * (NS_PER_SECOND / n) +
remainder_secs * (NS_PER_SECOND % n) / n;
return timespec_normalise(t);
}
Because we have the unit tests, we have high confidence that we haven't affected the functionality here.