Add units to hal times and durations names

The change adds "Ns" suffix to timeOnDevice, timeInDriver,
loopTimeoutDuration, deadline and duration.

Fix: 183118329
Test: mm
Change-Id: Id1f9ee4b8e41873c97690bb19a5e84572dd9ccf1
This commit is contained in:
Lev Proleev
2021-04-14 20:54:27 +01:00
parent 48ac0d6e79
commit 300b3245ae
13 changed files with 95 additions and 94 deletions

View File

@@ -34,6 +34,6 @@
package android.hardware.neuralnetworks;
@VintfStability
interface IBurst {
android.hardware.neuralnetworks.ExecutionResult executeSynchronously(in android.hardware.neuralnetworks.Request request, in long[] memoryIdentifierTokens, in boolean measureTiming, in long deadline, in long loopTimeoutDuration);
android.hardware.neuralnetworks.ExecutionResult executeSynchronously(in android.hardware.neuralnetworks.Request request, in long[] memoryIdentifierTokens, in boolean measureTiming, in long deadlineNs, in long loopTimeoutDurationNs);
void releaseMemoryResource(in long memoryIdentifierToken);
}

View File

@@ -41,8 +41,8 @@ interface IDevice {
boolean[] getSupportedOperations(in android.hardware.neuralnetworks.Model model);
android.hardware.neuralnetworks.DeviceType getType();
String getVersionString();
void prepareModel(in android.hardware.neuralnetworks.Model model, in android.hardware.neuralnetworks.ExecutionPreference preference, in android.hardware.neuralnetworks.Priority priority, in long deadline, in ParcelFileDescriptor[] modelCache, in ParcelFileDescriptor[] dataCache, in byte[] token, in android.hardware.neuralnetworks.IPreparedModelCallback callback);
void prepareModelFromCache(in long deadline, in ParcelFileDescriptor[] modelCache, in ParcelFileDescriptor[] dataCache, in byte[] token, in android.hardware.neuralnetworks.IPreparedModelCallback callback);
void prepareModel(in android.hardware.neuralnetworks.Model model, in android.hardware.neuralnetworks.ExecutionPreference preference, in android.hardware.neuralnetworks.Priority priority, in long deadlineNs, in ParcelFileDescriptor[] modelCache, in ParcelFileDescriptor[] dataCache, in byte[] token, in android.hardware.neuralnetworks.IPreparedModelCallback callback);
void prepareModelFromCache(in long deadlineNs, in ParcelFileDescriptor[] modelCache, in ParcelFileDescriptor[] dataCache, in byte[] token, in android.hardware.neuralnetworks.IPreparedModelCallback callback);
const int BYTE_SIZE_OF_CACHE_TOKEN = 32;
const int MAX_NUMBER_OF_CACHE_FILES = 32;
const int EXTENSION_TYPE_HIGH_BITS_PREFIX = 15;

View File

@@ -34,8 +34,8 @@
package android.hardware.neuralnetworks;
@VintfStability
interface IPreparedModel {
android.hardware.neuralnetworks.ExecutionResult executeSynchronously(in android.hardware.neuralnetworks.Request request, in boolean measureTiming, in long deadline, in long loopTimeoutDuration);
android.hardware.neuralnetworks.FencedExecutionResult executeFenced(in android.hardware.neuralnetworks.Request request, in ParcelFileDescriptor[] waitFor, in boolean measureTiming, in long deadline, in long loopTimeoutDuration, in long duration);
android.hardware.neuralnetworks.ExecutionResult executeSynchronously(in android.hardware.neuralnetworks.Request request, in boolean measureTiming, in long deadlineNs, in long loopTimeoutDurationNs);
android.hardware.neuralnetworks.FencedExecutionResult executeFenced(in android.hardware.neuralnetworks.Request request, in ParcelFileDescriptor[] waitFor, in boolean measureTiming, in long deadlineNs, in long loopTimeoutDurationNs, in long durationNs);
android.hardware.neuralnetworks.IBurst configureExecutionBurst();
const long DEFAULT_LOOP_TIMEOUT_DURATION_NS = 2000000000;
const long MAXIMUM_LOOP_TIMEOUT_DURATION_NS = 15000000000;

View File

@@ -34,6 +34,6 @@
package android.hardware.neuralnetworks;
@VintfStability
parcelable Timing {
long timeOnDevice;
long timeInDriver;
long timeOnDeviceNs;
long timeInDriverNs;
}

View File

@@ -77,18 +77,18 @@ interface IBurst {
* @param measure Specifies whether or not to measure duration of the execution. The duration
* runs from the time the driver sees the call to the executeSynchronously
* function to the time the driver returns from the function.
* @param deadline The time by which the execution is expected to complete. The time is measured
* in nanoseconds since epoch of the steady clock (as from
* std::chrono::steady_clock). If the execution cannot be finished by the
* deadline, the execution may be aborted. Passing -1 means the deadline is
* omitted. Other negative values are invalid.
* @param loopTimeoutDuration The maximum amount of time in nanoseconds that should be spent
* executing a {@link OperationType::WHILE} operation. If a loop
* condition model does not output false within this duration, the
* execution must be aborted. If -1 is provided, the maximum amount
* of time is {@link DEFAULT_LOOP_TIMEOUT_DURATION_NS}. Other
* negative values are invalid. When provided, the duration must not
* exceed {@link MAXIMUM_LOOP_TIMEOUT_DURATION_NS}.
* @param deadlineNs The time by which the execution is expected to complete. The time is
* measured in nanoseconds since epoch of the steady clock (as from
* std::chrono::steady_clock). If the execution cannot be finished by the
* deadline, the execution may be aborted. Passing -1 means the deadline is
* omitted. Other negative values are invalid.
* @param loopTimeoutDurationNs The maximum amount of time in nanoseconds that should be spent
* executing a {@link OperationType::WHILE} operation. If a loop
* condition model does not output false within this duration, the
* execution must be aborted. If -1 is provided, the maximum amount
* of time is {@link DEFAULT_LOOP_TIMEOUT_DURATION_NS}. Other
* negative values are invalid. When provided, the duration must
* not exceed {@link MAXIMUM_LOOP_TIMEOUT_DURATION_NS}.
* @return ExecutionResult parcelable, containing the status of the execution, output shapes and
* timing information.
* @throws ServiceSpecificException with one of the following ErrorStatus values:
@@ -100,7 +100,7 @@ interface IBurst {
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
*/
ExecutionResult executeSynchronously(in Request request, in long[] memoryIdentifierTokens,
in boolean measureTiming, in long deadline, in long loopTimeoutDuration);
in boolean measureTiming, in long deadlineNs, in long loopTimeoutDurationNs);
/**
* releaseMemoryResource is used by the client to signal to the service that a memory buffer

View File

@@ -306,11 +306,11 @@ interface IDevice {
* @param preference Indicates the intended execution behavior of a prepared model.
* @param priority The priority of the prepared model relative to other prepared models owned by
* the client.
* @param deadline The time by which the model is expected to be prepared. The time is measured
* in nanoseconds since boot (as from clock_gettime(CLOCK_BOOTTIME, &ts)
* or ::android::base::boot_clock). If the model cannot be prepared by the
* deadline, the preparation may be aborted. Passing -1 means the deadline is
* omitted. Other negative values are invalid.
* @param deadlineNs The time by which the model is expected to be prepared. The time is
* measured in nanoseconds since boot (as from clock_gettime(CLOCK_BOOTTIME,
* &ts) or ::android::base::boot_clock). If the model cannot be prepared by
* the deadline, the preparation may be aborted. Passing -1 means the deadline
* is omitted. Other negative values are invalid.
* @param modelCache A vector of file descriptors for the security-sensitive cache. The length
* of the vector must either be 0 indicating that caching information is not
* provided, or match the numModelCache returned from
@@ -344,7 +344,7 @@ interface IDevice {
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
*/
void prepareModel(in Model model, in ExecutionPreference preference, in Priority priority,
in long deadline, in ParcelFileDescriptor[] modelCache,
in long deadlineNs, in ParcelFileDescriptor[] modelCache,
in ParcelFileDescriptor[] dataCache, in byte[] token,
in IPreparedModelCallback callback);
@@ -395,11 +395,11 @@ interface IDevice {
* with a set of inputs to the model. Note that the same prepared model object may be used with
* different shapes of inputs on different (possibly concurrent) executions.
*
* @param deadline The time by which the model is expected to be prepared. The time is measured
* in nanoseconds since boot (as from clock_gettime(CLOCK_BOOTTIME, &ts) or
* ::android::base::boot_clock). If the model cannot be prepared by the
* deadline, the preparation may be aborted. Passing -1 means the deadline is
* omitted. Other negative values are invalid.
* @param deadlineNs The time by which the model is expected to be prepared. The time is
* measured in nanoseconds since boot (as from clock_gettime(CLOCK_BOOTTIME,
* &ts) or ::android::base::boot_clock). If the model cannot be prepared by
* the deadline, the preparation may be aborted. Passing -1 means the deadline
* is omitted. Other negative values are invalid.
* @param modelCache A vector of file descriptors for the security-sensitive cache. The length
* of the vector must match the numModelCache returned from
* getNumberOfCacheFilesNeeded. The cache file descriptors will be provided in
@@ -426,7 +426,7 @@ interface IDevice {
* the deadline
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
*/
void prepareModelFromCache(in long deadline, in ParcelFileDescriptor[] modelCache,
void prepareModelFromCache(in long deadlineNs, in ParcelFileDescriptor[] modelCache,
in ParcelFileDescriptor[] dataCache, in byte[] token,
in IPreparedModelCallback callback);
}

View File

@@ -72,18 +72,18 @@ interface IPreparedModel {
* @param measure Specifies whether or not to measure duration of the execution. The duration
* runs from the time the driver sees the call to the executeSynchronously
* function to the time the driver returns from the function.
* @param deadline The time by which the execution is expected to complete. The time is measured
* in nanoseconds since boot (as from clock_gettime(CLOCK_BOOTTIME, &ts) or
* ::android::base::boot_clock). If the execution cannot be finished by the
* deadline, the execution may be aborted. Passing -1 means the deadline is
* omitted. Other negative values are invalid.
* @param loopTimeoutDuration The maximum amount of time in nanoseconds that should be spent
* executing a {@link OperationType::WHILE} operation. If a loop
* condition model does not output false within this duration, the
* execution must be aborted. If -1 is provided, the maximum amount
* of time is {@link DEFAULT_LOOP_TIMEOUT_DURATION_NS}. Other
* negative values are invalid. When provided, the duration must not
* exceed {@link MAXIMUM_LOOP_TIMEOUT_DURATION_NS}.
* @param deadlineNs The time by which the execution is expected to complete. The time is
* measured in nanoseconds since boot (as from clock_gettime(CLOCK_BOOTTIME,
* &ts) or ::android::base::boot_clock). If the execution cannot be finished
* by the deadline, the execution may be aborted. Passing -1 means the
* deadline is omitted. Other negative values are invalid.
* @param loopTimeoutDurationNs The maximum amount of time in nanoseconds that should be spent
* executing a {@link OperationType::WHILE} operation. If a loop
* condition model does not output false within this duration, the
* execution must be aborted. If -1 is provided, the maximum amount
* of time is {@link DEFAULT_LOOP_TIMEOUT_DURATION_NS}. Other
* negative values are invalid. When provided, the duration must
* not exceed {@link MAXIMUM_LOOP_TIMEOUT_DURATION_NS}.
* @return ExecutionResult parcelable, containing the status of the execution, output shapes and
* timing information.
* @throws ServiceSpecificException with one of the following ErrorStatus values:
@@ -95,7 +95,7 @@ interface IPreparedModel {
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
*/
ExecutionResult executeSynchronously(in Request request, in boolean measureTiming,
in long deadline, in long loopTimeoutDuration);
in long deadlineNs, in long loopTimeoutDurationNs);
/**
* Launch a fenced asynchronous execution on a prepared model.
@@ -137,22 +137,23 @@ interface IPreparedModel {
* @param waitFor A vector of sync fence file descriptors. Execution must not start until all
* sync fences have been signaled.
* @param measure Specifies whether or not to measure duration of the execution.
* @param deadline The time by which the execution is expected to complete. The time is measured
* in nanoseconds since boot (as from clock_gettime(CLOCK_BOOTTIME, &ts) or
* ::android::base::boot_clock). If the execution cannot be finished by the
* deadline, the execution may be aborted. Passing -1 means the deadline is
* omitted. Other negative values are invalid.
* @param loopTimeoutDuration The maximum amount of time in nanoseconds that should be spent
* executing a {@link OperationType::WHILE} operation. If a loop
* condition model does not output false within this duration, the
* execution must be aborted. If -1 is provided, the maximum amount
* of time is {@link DEFAULT_LOOP_TIMEOUT_DURATION_NS}. Other
* negative values are invalid. When provided, the duration must not
* exceed {@link MAXIMUM_LOOP_TIMEOUT_DURATION_NS}.
* @param duration The length of time in nanoseconds within which the execution is expected to
* complete after all sync fences in waitFor are signaled. If the execution
* cannot be finished within the duration, the execution may be aborted. Passing
* -1 means the duration is omitted. Other negative values are invalid.
* @param deadlineNs The time by which the execution is expected to complete. The time is
* measured in nanoseconds since boot (as from clock_gettime(CLOCK_BOOTTIME,
* &ts) or ::android::base::boot_clock). If the execution cannot be finished
* by the deadline, the execution may be aborted. Passing -1 means the
* deadline is omitted. Other negative values are invalid.
* @param loopTimeoutDurationNs The maximum amount of time in nanoseconds that should be spent
* executing a {@link OperationType::WHILE} operation. If a loop
* condition model does not output false within this duration, the
* execution must be aborted. If -1 is provided, the maximum amount
* of time is {@link DEFAULT_LOOP_TIMEOUT_DURATION_NS}. Other
* negative values are invalid. When provided, the duration must
* not exceed {@link MAXIMUM_LOOP_TIMEOUT_DURATION_NS}.
* @param durationNs The length of time in nanoseconds within which the execution is expected to
* complete after all sync fences in waitFor are signaled. If the execution
* cannot be finished within the duration, the execution may be aborted.
* Passing -1 means the duration is omitted. Other negative values are
* invalid.
* @return The FencedExecutionResult parcelable, containing IFencedExecutionCallback and the
* sync fence.
* @throws ServiceSpecificException with one of the following ErrorStatus values:
@@ -165,8 +166,8 @@ interface IPreparedModel {
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
*/
FencedExecutionResult executeFenced(in Request request, in ParcelFileDescriptor[] waitFor,
in boolean measureTiming, in long deadline, in long loopTimeoutDuration,
in long duration);
in boolean measureTiming, in long deadlineNs, in long loopTimeoutDurationNs,
in long durationNs);
/**
* Configure a Burst object used to execute multiple inferences on a prepared model in rapid

View File

@@ -28,9 +28,9 @@ parcelable Timing {
/**
* Execution time on device (not driver, which runs on host processor).
*/
long timeOnDevice;
long timeOnDeviceNs;
/**
* Execution time in driver (including time on device).
*/
long timeInDriver;
long timeInDriverNs;
}

View File

@@ -410,11 +410,11 @@ GeneralResult<SharedMemory> unvalidatedConvert(const aidl_hal::Memory& memory) {
}
GeneralResult<Timing> unvalidatedConvert(const aidl_hal::Timing& timing) {
if (timing.timeInDriver < -1) {
return NN_ERROR() << "Timing: timeInDriver must not be less than -1";
if (timing.timeInDriverNs < -1) {
return NN_ERROR() << "Timing: timeInDriverNs must not be less than -1";
}
if (timing.timeOnDevice < -1) {
return NN_ERROR() << "Timing: timeOnDevice must not be less than -1";
if (timing.timeOnDeviceNs < -1) {
return NN_ERROR() << "Timing: timeOnDeviceNs must not be less than -1";
}
constexpr auto convertTiming = [](int64_t halTiming) -> OptionalDuration {
if (halTiming == kNoTiming) {
@@ -422,8 +422,8 @@ GeneralResult<Timing> unvalidatedConvert(const aidl_hal::Timing& timing) {
}
return nn::Duration(static_cast<uint64_t>(halTiming));
};
return Timing{.timeOnDevice = convertTiming(timing.timeOnDevice),
.timeInDriver = convertTiming(timing.timeInDriver)};
return Timing{.timeOnDevice = convertTiming(timing.timeOnDeviceNs),
.timeInDriver = convertTiming(timing.timeInDriverNs)};
}
GeneralResult<Model::OperandValues> unvalidatedConvert(const std::vector<uint8_t>& operandValues) {
@@ -964,8 +964,8 @@ nn::GeneralResult<RequestMemoryPool> unvalidatedConvert(const nn::Request::Memor
nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing) {
return Timing{
.timeOnDevice = NN_TRY(unvalidatedConvert(timing.timeOnDevice)),
.timeInDriver = NN_TRY(unvalidatedConvert(timing.timeInDriver)),
.timeOnDeviceNs = NN_TRY(unvalidatedConvert(timing.timeOnDevice)),
.timeInDriverNs = NN_TRY(unvalidatedConvert(timing.timeInDriver)),
};
}

View File

@@ -39,7 +39,7 @@ using ::testing::InvokeWithoutArgs;
using ::testing::SetArgPointee;
const std::shared_ptr<IPreparedModel> kInvalidPreparedModel;
constexpr auto kNoTiming = Timing{.timeOnDevice = -1, .timeInDriver = -1};
constexpr auto kNoTiming = Timing{.timeOnDeviceNs = -1, .timeInDriverNs = -1};
constexpr auto makeStatusOk = [] { return ndk::ScopedAStatus::ok(); };

View File

@@ -547,7 +547,7 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
makeOutputInsufficientSize(kInsufficientOutputIndex, &request);
}
int64_t loopTimeoutDuration = kOmittedTimeoutDuration;
int64_t loopTimeoutDurationNs = kOmittedTimeoutDuration;
// OutputType::MISSED_DEADLINE is only used by
// TestKind::INTINITE_LOOP_TIMEOUT tests to verify that an infinite loop is
// aborted after a timeout.
@@ -555,7 +555,7 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
// Override the default loop timeout duration with a small value to
// speed up test execution.
constexpr int64_t kMillisecond = 1'000'000;
loopTimeoutDuration = 1 * kMillisecond;
loopTimeoutDurationNs = 1 * kMillisecond;
}
ErrorStatus executionStatus;
@@ -568,7 +568,7 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
ExecutionResult executionResult;
// execute
const auto ret = preparedModel->executeSynchronously(request, testConfig.measureTiming,
kNoDeadline, loopTimeoutDuration,
kNoDeadline, loopTimeoutDurationNs,
&executionResult);
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
<< ret.getDescription();
@@ -608,7 +608,7 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
ExecutionResult executionResult;
// execute
ret = burst->executeSynchronously(request, slots, testConfig.measureTiming, kNoDeadline,
loopTimeoutDuration, &executionResult);
loopTimeoutDurationNs, &executionResult);
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
<< ret.getDescription();
if (ret.isOk()) {
@@ -635,7 +635,7 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
ErrorStatus result = ErrorStatus::NONE;
FencedExecutionResult executionResult;
auto ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming,
kNoDeadline, loopTimeoutDuration, kNoDuration,
kNoDeadline, loopTimeoutDurationNs, kNoDuration,
&executionResult);
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
<< ret.getDescription();
@@ -649,7 +649,7 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
waitFor.emplace_back(dupFd);
// If a sync fence is returned, try start another run waiting for the sync fence.
ret = preparedModel->executeFenced(request, waitFor, testConfig.measureTiming,
kNoDeadline, loopTimeoutDuration, kNoDuration,
kNoDeadline, loopTimeoutDurationNs, kNoDuration,
&executionResult);
ASSERT_TRUE(ret.isOk());
waitForSyncFence(executionResult.syncFence.get());
@@ -686,8 +686,8 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
if (!testConfig.measureTiming) {
EXPECT_EQ(timing, kNoTiming);
} else {
if (timing.timeOnDevice != -1 && timing.timeInDriver != -1) {
EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
if (timing.timeOnDeviceNs != -1 && timing.timeInDriverNs != -1) {
EXPECT_LE(timing.timeOnDeviceNs, timing.timeInDriverNs);
}
}

View File

@@ -53,7 +53,7 @@ using MaybeResults = std::optional<Results>;
using ExecutionFunction =
std::function<MaybeResults(const std::shared_ptr<IPreparedModel>& preparedModel,
const Request& request, int64_t deadline)>;
const Request& request, int64_t deadlineNs)>;
static int64_t makeDeadline(DeadlineBoundType deadlineBoundType) {
const auto getNanosecondsSinceEpoch = [](const auto& time) -> int64_t {
@@ -79,9 +79,9 @@ static int64_t makeDeadline(DeadlineBoundType deadlineBoundType) {
void runPrepareModelTest(const std::shared_ptr<IDevice>& device, const Model& model,
Priority priority, std::optional<DeadlineBoundType> deadlineBound) {
int64_t deadline = kNoDeadline;
int64_t deadlineNs = kNoDeadline;
if (deadlineBound.has_value()) {
deadline = makeDeadline(deadlineBound.value());
deadlineNs = makeDeadline(deadlineBound.value());
}
// see if service can handle model
@@ -96,8 +96,8 @@ void runPrepareModelTest(const std::shared_ptr<IDevice>& device, const Model& mo
const std::shared_ptr<PreparedModelCallback> preparedModelCallback =
ndk::SharedRefBase::make<PreparedModelCallback>();
const auto prepareLaunchStatus =
device->prepareModel(model, ExecutionPreference::FAST_SINGLE_ANSWER, priority, deadline,
{}, {}, kEmptyCacheToken, preparedModelCallback);
device->prepareModel(model, ExecutionPreference::FAST_SINGLE_ANSWER, priority,
deadlineNs, {}, {}, kEmptyCacheToken, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk())
<< "prepareLaunchStatus: " << prepareLaunchStatus.getDescription();
@@ -156,13 +156,13 @@ void runPrepareModelTests(const std::shared_ptr<IDevice>& device, const Model& m
}
static MaybeResults executeSynchronously(const std::shared_ptr<IPreparedModel>& preparedModel,
const Request& request, int64_t deadline) {
const Request& request, int64_t deadlineNs) {
SCOPED_TRACE("synchronous");
const bool measure = false;
// run execution
ExecutionResult executionResult;
const auto ret = preparedModel->executeSynchronously(request, measure, deadline,
const auto ret = preparedModel->executeSynchronously(request, measure, deadlineNs,
kOmittedTimeoutDuration, &executionResult);
EXPECT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
<< ret.getDescription();
@@ -182,7 +182,7 @@ static MaybeResults executeSynchronously(const std::shared_ptr<IPreparedModel>&
}
static MaybeResults executeBurst(const std::shared_ptr<IPreparedModel>& preparedModel,
const Request& request, int64_t deadline) {
const Request& request, int64_t deadlineNs) {
SCOPED_TRACE("burst");
const bool measure = false;
@@ -200,7 +200,7 @@ static MaybeResults executeBurst(const std::shared_ptr<IPreparedModel>& prepared
// run execution
ExecutionResult executionResult;
ret = burst->executeSynchronously(request, slots, measure, deadline, kOmittedTimeoutDuration,
ret = burst->executeSynchronously(request, slots, measure, deadlineNs, kOmittedTimeoutDuration,
&executionResult);
EXPECT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
<< ret.getDescription();
@@ -224,10 +224,10 @@ void runExecutionTest(const std::shared_ptr<IPreparedModel>& preparedModel,
const ExecutionContext& context, bool synchronous,
DeadlineBoundType deadlineBound) {
const ExecutionFunction execute = synchronous ? executeSynchronously : executeBurst;
const auto deadline = makeDeadline(deadlineBound);
const auto deadlineNs = makeDeadline(deadlineBound);
// Perform execution and unpack results.
const auto results = execute(preparedModel, request, deadline);
const auto results = execute(preparedModel, request, deadlineNs);
if (!results.has_value()) return;
const auto& [status, outputShapes, timing] = results.value();

View File

@@ -43,7 +43,7 @@ namespace nn = ::android::nn;
inline constexpr Priority kDefaultPriority = Priority::MEDIUM;
inline constexpr Timing kNoTiming = {.timeOnDevice = -1, .timeInDriver = -1};
inline constexpr Timing kNoTiming = {.timeOnDeviceNs = -1, .timeInDriverNs = -1};
inline constexpr int64_t kNoDeadline = -1;
inline constexpr int64_t kOmittedTimeoutDuration = -1;
inline constexpr int64_t kNoDuration = -1;