
SdkToken, which is used throughout the selfie verification process and to pull the final verification result.
SDKToken, which is used throughout the selfie verification process and used by the API GetFaceIdResultIntl to obtain the liveness comparison result. The merchant server also needs to provide the corresponding endpoint for the merchant client to call. The following sample code with the Golang language is used as an example to show how to call TencentCloud API on the server and obtain the correct response.SDKToken using the API GetFaceIdTokenIntl, you can return other responses required by the client application to the client along with the SDKToken.var FaceIdClient *faceid.Clientfunc init() {// Instantiate a client configuration object. You can specify the timeout period and other configuration itemsprof := profile.NewClientProfile()prof.HttpProfile.ReqTimeout = 60// TODO replace the SecretId and SecretKey string with the API SecretId and SecretKeycredential := cloud.NewCredential("SecretId", "SecretKey")var err error// Instantiate the client object of the requested faceidFaceIdClient, err = faceid.NewClient(credential, "ap-singapore", prof)if nil != err {log.Fatal("FaceIdClient init error: ", err)}}// GetFaceIdToken get tokenfunc GetFaceIdToken(w http.ResponseWriter, r *http.Request) {log.Println("get face id token")// Step 1: ... parse parameters_ = r.ParseForm()var SecureLevel = r.FormValue("SecureLevel")// Step 2: instantiate the request object and provide necessary parametersrequest := faceid.NewGetFaceIdTokenIntlRequest()request.SecureLevel = &SecureLevel// Step 3: call the Tencent Cloud API through FaceIdClientresponse, err := FaceIdClient.GetFaceIdTokenIntl(request)// Step 4: process the Tencent Cloud API response and construct the return objectif nil != err {_, _ = w.Write([]byte("error"))return}SdkToken := response.Response.SdkTokenapiResp := struct {SdkToken *string}{SdkToken: SdkToken}b, _ := json.Marshal(apiResp)// ... more codes are omitted//Step 5: return the service response_, _ = w.Write(b)}// GetFaceIdResult get resultfunc GetFaceIdResult(w http.ResponseWriter, r *http.Request) {// Step 1: ... parse parameters_ = r.ParseForm()SdkToken := r.FormValue("SdkToken")// Step 2: instantiate the request object and provide necessary parametersrequest := faceid.NewGetFaceIdResultIntlRequest()request.SdkToken = &SdkToken// Step 3: call the Tencent Cloud API through FaceIdClientresponse, err := FaceIdClient.GetFaceIdResultIntl(request)// Step 4: process the Tencent Cloud API response and construct the return objectif nil != err {_, _ = w.Write([]byte("error"))return}result := response.Response.ResultapiResp := struct {Result *string}{Result: result}b, _ := json.Marshal(apiResp)// ... more codes are omitted//Step 5: return the service response_, _ = w.Write(b)}func main() {// expose endpointshttp.HandleFunc("/api/v1/get-token", GetFaceIdToken)http.HandleFunc("/api/v1/get-result", GetFaceIdResult)// listening porterr := http.ListenAndServe(":8080", nil)if nil != err {log.Fatal("ListenAndServe error: ", err)}}
SdkToken is returned and access the API (http://ip:port/api/v1/get-result) to check whether the value of the Result field is 0. Through these results, you can determine whether the server integration is successful. For details on responses, see API introduction.├── codedemo│ ├── build.gradle│ ├── libs│ │ ├── huiyansdk_android_overseas_1.0.9.14_release.aar│ │ ├── tencent-ai-sdk-aicamera-1.0.25-release.aar│ │ ├── tencent-ai-sdk-common-1.1.43-release.aar│ │ ├── tencent-ai-sdk-network-1.0.2.3.6-release.aar│ │ └── tencent-ai-sdk-youtu-base-1.0.1.44-release.aar│ ├── proguard-rules.pro│ └── src│ └── main
// Set up filtering based on the NDK SO architecture (taking armeabi-v7a as an example; if the device also supports arm64-v8a, that option can be added as well).defaultConfig {ndk {abiFilters 'armeabi-v7a'}}dependencies {// Introduce the SDK.implementation files("libs/huiyansdk_android_overseas_1.0.9.5_release.aar")// Common Algorithm SDK.implementation files("libs/tencent-ai-sdk-youtu-base-1.0.1.32-release.aar")// Common Capability Component Library.implementation files("libs/tencent-ai-sdk-common-1.1.27-release.aar")implementation files("libs/tencent-ai-sdk-aicamera-1.0.18-release.aar")implementation files("libs/tencent-ai-sdk-network-1.0.2.3.6-release.aar")// Third-party libraries that SDK relies on.// gsonimplementation 'com.google.code.gson:gson:2.8.9'}
AndroidManifest.xml” file.<!-- Camera permission --><uses-permission android:name="android.permission.CAMERA" /><uses-featureandroid:name="android.hardware.camera"android:required="true" /><!-- Network permission required by the SDK --><uses-permission android:name="android.permission.INTERNET" /><!-- Dependency required for device risk control --><uses-permission android:name="android.permission.ACCESS_NETWORK_STATE" /><!-- Permissions required for the SDK (optional) --><uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
AndroidManifest.xml” file, it is also necessary to use code to dynamically request the required permissions.Application framework, as it is primarily used to perform various initialization tasks related to the SDK.@Overridepublic void onCreate() {super.onCreate();// The SDK needs to be initialized during the application startup process.HuiYanOsApi.init(this);}

// Relevant parameters of HuiYanOs.HuiYanOsConfig huiYanOsConfig = new HuiYanOsConfig();// This license file is located in the **assets** folder.huiYanOsConfig.setAuthLicense("YTFaceSDK.license");// The license file for device risk control (required when using the device risk control mode) should also be stored in the **assets** folder.huiYanOsConfig.setRiskLicense("turing.lic");// Enable the device's risk control capabilities.huiYanOsConfig.setOpenCheckRiskMode(true);if (compatCheckBox.isChecked()) {huiYanOsConfig.setPageColorStyle(PageColorStyle.Dark);}// This method is used to initiate the identity verification process. The initial liveness verification step is carried out using the data sent by the backend through the currentToken.HuiYanOsApi.startHuiYanAuth(currentToken, huiYanOsConfig, new HuiYanOsAuthCallBack() {@Overridepublic void onSuccess(HuiYanOsAuthResult authResult) {// Display the results.runOnUiThread(new Runnable() {@Overridepublic void run() {Toast.makeText(SimplifyActivity.this, "Liveness detection passed!", Toast.LENGTH_SHORT).show();}});}@Overridepublic void onFail(int errorCode, String errorMsg, String token) {String msg = "Liveness detection failed" + "code: " + errorCode + " msg: " + errorMsg + " token: " + token;Log.e(TAG, "onFail" + msg);// Display the results.runOnUiThread(new Runnable() {@Overridepublic void run() {Toast.makeText(SimplifyActivity.this, msg, Toast.LENGTH_SHORT).show();}});}});
├── codedemo│ ├── build.gradle│ ├── libs│ ├── proguard-rules.pro│ └── src│ └── main│ └── assets│ ├── turing.lic│ └── YTFaceSDK.license
@Overrideprotected void onDestroy() {super.onDestroy();// Release resources when exiting the App.HuiYanOsApi.release();}
# The SDK obfuscation feature includes the following:-keep class com.tencent.could.huiyansdk.** {*;}-keep class com.tencent.could.aicamare.** {*;}-keep class com.tencent.could.component.** {*;}-keep class com.tencent.youtu.** {*;}-keep class com.tenpay.utils.SMUtils {*;}-keep class com.tencent.turingface.** {*;}-keep class com.turingface.sdk.** {*;}-keep class com.tencent.cloud.ai.network.** {*;}
Link Binary With Libraries.└──HuiYanOverseasSDK.xcframework
├── YTFaceSDK.license├── turing.license└── face-tracker-v003.bundle
└── HuiYanSDKUI.bundle
├──Your Project.xcodeproj├──Podfile├──CloudHuiYanSDK_FW├───────CloudHuiYanSDK_FW.podspec├───────Frameworks├────────────HuiYanOverseasSDK.xcframework├───────Resources├────────────HuiYanSDKUI.bundle└────────────face-tracker-v003.bundle
target 'HuiYanAuthDemo' douse_frameworks!pod 'CloudHuiYanSDK_FW', :path => './CloudHuiYanSDK_FW'end
Build Settings -> Framework Search Paths add $(inherited)Build Settings -> Other Linker Flags add $(inherited)<key>Privacy - Camera Usage Description</key><string>Permission to enable your camera is required for face recognition.</string>

#import <HuiYanOverseasSDK/HuiYanSDK.h>// Obtain token.NSString *faceToken = self.tokenTextField.text;// Configure SDK.HuiYanOsConfig *config = [[HuiYanOsConfig alloc] init];// Configure lic.config.authLicense = [[NSBundle mainBundle] pathForResource:@"YTFaceSDK.license" ofType:@""];// Configure timeout for the preparation phase.config.prepareTimeoutMs = 20000;// Configure timeout for the motion detection phase.config.actionTimeoutMs = 20000;// Delete local liveness detection videos.config.isDeleteVideoCache = YES;// Set UI-related callbacks.config.delegate = self;// Configure custom multi-language types.config.languageType = EN;// config.userLanguageFileName = @"ko";// config.userLanguageBundleName = @"UseLanguageBundle";config.iShowTipsPage = YES;[[HuiYanOSKit sharedInstance] startHuiYaneKYC:faceToken withConfig:config witSuccCallback:^(HuiYanOsAuthResult * _Nonnull authResult, id _Nullable reserved) {NSString *token = authResult.faceToken;} withFailCallback:^(int errCode, NSString * _Nonnull errMsg, id _Nullable reserved) {NSString *showMsg = [NSString stringWithFormat:@"err:%d:%@",errCode,errMsg];NSLog(@"err:%@",showMsg);}];
// COnfigure SDK.HuiYanOsConfig *config = [[HuiYanOsConfig alloc] init];// Configure lic.config.authLicense = [[NSBundle mainBundle] pathForResource:@"YTFaceSDK.license" ofType:@""];config.openCheckRiskMode = YES;// If risk detection is enabled, risk authorization information must be provided.config.riskLicense = [[NSBundle mainBundle] pathForResource:@"turing.license" ofType:@""];
Feedback