upload initial version.

main
詹力 2023-11-29 13:21:56 +08:00
commit 81da026de3
36 changed files with 762 additions and 0 deletions

2
Code/triad_openvr/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
__pycache__/
*.pyc

View File

@ -0,0 +1,49 @@
# Triad OpenVR Python Wrapper
This is an enhanced wrapper for the already excellent [pyopenvr library](https://github.com/cmbruns/pyopenvr) by [cmbruns](https://github.com/cmbruns). The goal of this library is to create easy to use python functions for any SteamVR tracked system.
# Getting Started
```python
import triad_openvr as vr
import pylab as plt
v = vr.triad_openvr()
data = v.devices["controller_1"].sample(1000,250)
plt.plot(data.time,data.x)
plt.title('Controller X Coordinate')
plt.xlabel('Time (seconds)')
plt.ylabel('X Coordinate (meters)')
```
![Example plot of captured data](images/simple_xcoord_plot.png "Example Plot")
# Configuration file
The goal is to identify devices by serial, in order to keep the same name for the same physical device. for maing it work, you just have to change serials and names in the 'config.json' file. Here is an example of config file :
```
{
"devices":[
{
"name": "hmd",
"type": "HMD",
"serial":"XXX-XXXXXXXX"
},
{
"name": "tracking_reference_1",
"type": "Tracking Reference",
"serial":"LHB-XXXXXXXX"
},
{
"name": "controller_1",
"type": "Controller",
"serial":"XXX-XXXXXXXX"
},
{
"name": "tracker_1",
"type": "Tracker",
"serial":"LHR-XXXXXXXX"
}
]
}
```

View File

@ -0,0 +1,29 @@
{
"devices":[
{
"name": "tracking_reference_0",
"type": "Tracking Reference",
"serial":"LHB-02F97E98"
},
{
"name": "tracking_reference_1",
"type": "Tracking Reference",
"serial":"LHB-431A55FD"
},
{
"name": "tracker_0",
"type": "Tracker",
"serial":"LHR-3CD1A9DA"
},
{
"name": "tracker_1",
"type": "Tracker",
"serial":"LHR-25865D81"
},
{
"name": "tracker_2",
"type": "Tracker",
"serial":"LHR-4359D2B6"
}
]
}

View File

@ -0,0 +1,26 @@
import triad_openvr
import time
import sys
v = triad_openvr.triad_openvr()
v.print_discovered_objects()
if len(sys.argv) == 1:
interval = 1/250
elif len(sys.argv) == 2:
interval = 1/float(sys.argv[1])
else:
print("Invalid number of arguments")
interval = False
if interval:
while(True):
start = time.time()
txt = ""
for each in v.devices["controller_1"].get_pose_euler():
txt += "%.4f" % each
txt += " "
print("\r" + txt, end="")
sleep_time = interval-(time.time()-start)
if sleep_time>0:
time.sleep(sleep_time)

View File

@ -0,0 +1,118 @@
{
"steamvr" : {
"requireHmd" : false,
"forcedDriver" : "null",
"forcedHmd" : "",
"displayDebug" : false,
"debugProcessPipe" : "",
"enableDistortion" : true,
"displayDebugX" : 0,
"displayDebugY" : 0,
"sendSystemButtonToAllApps" : false,
"loglevel" : 3,
"ipd" : 0.063,
"ipdOffset" : 0.0,
"background" : "",
"backgroundUseDomeProjection" : false,
"backgroundCameraHeight" : 1.6,
"backgroundDomeRadius" : 0.0,
"environment" : "",
"gridColor" : "",
"playAreaColor" : "",
"showStage" : false,
"activateMultipleDrivers" : true,
"directMode" : true,
"usingSpeakers" : false,
"speakersForwardYawOffsetDegrees" : 0.0,
"basestationPowerManagement" : false,
"neverKillProcesses" : false,
"renderTargetMultiplier" : 1.0,
"allowAsyncReprojection" : true,
"allowInterleavedReprojection" : true,
"forceReprojection" : false,
"forceFadeOnBadTracking" : true,
"defaultMirrorView" : 0,
"showMirrorView" : false,
"startMonitorFromAppLaunch" : true,
"startCompositorFromAppLaunch" : true,
"startDashboardFromAppLaunch" : true,
"startOverlayAppsFromDashboard" : true,
"enableHomeApp" : false,
"setInitialDefaultHomeApp" : false,
"CycleBackgroundImageTimeSec" : -1,
"retailDemo" : false,
"panelMask" : true,
"panelMaskVignette" : true,
"panelMaskVignetteWidth" : 2.0
},
"userinterface" : {
"StatusAlwaysOnTop" : true,
"screenshots" : true,
"screenshotType" : 2
},
"notifications" : {
"DoNotDisturb" : false
},
"keyboard" : {
"TutorialCompletions" : 0,
"ScaleX" : 1.1,
"ScaleY" : 1.1,
"OffsetLeftX" : 0.0,
"OffsetRightX" : 0.0,
"OffsetY" : 0.0,
"Smoothing" : false
},
"perfcheck" : {
"heuristicActive" : true,
"warnInHMD" : false,
"warnOnlyOnce" : false,
"allowTimingStore" : false,
"saveTimingsOnExit" : false,
"perfTestData" : 0.0
},
"collisionBounds" : {
"CollisionBoundsStyle" : 0,
"CollisionBoundsGroundPerimeterOn" : false,
"CollisionBoundsCenterMarkerOn" : false,
"CollisionBoundsPlaySpaceOn" : false,
"CollisionBoundsFadeDistance" : 0.7,
"CollisionBoundsColorGammaR" : 0,
"CollisionBoundsColorGammaG" : 255,
"CollisionBoundsColorGammaB" : 255,
"CollisionBoundsColorGammaA" : 153
},
"camera" : {
"enableCamera" : false,
"enableCameraInDashboard" : false,
"enableCameraForCollisionBounds" : false,
"enableCameraForRoomView" : false,
"cameraBoundsColorGammaR" : 0,
"cameraBoundsColorGammaG" : 255,
"cameraBoundsColorGammaB" : 255,
"cameraBoundsColorGammaA" : 153,
"cameraBoundsStrength" : 78
},
"audio" : {
"onPlaybackDevice" : "",
"onRecordDevice" : "",
"onPlaybackMirrorDevice" : "",
"offPlaybackDevice" : "",
"offRecordDevice" : "",
"viveHDMIGain" : true
},
"power" : {
"powerOffOnExit" : true,
"turnOffScreensTimeout" : 5.0,
"turnOffControllersTimeout" : 300.0,
"returnToWatchdogTimeout" : 0.0,
"autoLaunchSteamVROnButtonPress" : true
},
"modelskins" : {
},
"dashboard" : {
"enableDashboard" : true,
"arcadeMode" : false
},
"version" : "1",
"jsonid" : "vrsettings"
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

View File

@ -0,0 +1,26 @@
import triad_openvr
import time
import sys
v = triad_openvr.triad_openvr()
v.print_discovered_objects()
if len(sys.argv) == 1:
interval = 1/250
elif len(sys.argv) == 2:
interval = 1/float(sys.argv[1])
else:
print("Invalid number of arguments")
interval = False
if interval:
while(True):
start = time.time()
txt = ""
for each in v.devices["tracker_1"].get_pose_euler():
txt += "%.4f" % each
txt += " "
print("\r" + txt, end="")
sleep_time = interval-(time.time()-start)
if sleep_time>0:
time.sleep(sleep_time)

View File

@ -0,0 +1,299 @@
import time
import sys
import openvr
import math
import json
from functools import lru_cache
# Function to print out text but instead of starting a new line it will overwrite the existing line
def update_text(txt):
sys.stdout.write('\r'+txt)
sys.stdout.flush()
#Convert the standard 3x4 position/rotation matrix to a x,y,z location and the appropriate Euler angles (in degrees)
def convert_to_euler(pose_mat):
yaw = 180 / math.pi * math.atan2(pose_mat[1][0], pose_mat[0][0])
pitch = 180 / math.pi * math.atan2(pose_mat[2][0], pose_mat[0][0])
roll = 180 / math.pi * math.atan2(pose_mat[2][1], pose_mat[2][2])
x = pose_mat[0][3]
y = pose_mat[1][3]
z = pose_mat[2][3]
return [x,y,z,yaw,pitch,roll]
#Convert the standard 3x4 position/rotation matrix to a x,y,z location and the appropriate Quaternion
def convert_to_quaternion(pose_mat):
# Per issue #2, adding a abs() so that sqrt only results in real numbers
r_w = math.sqrt(abs(1+pose_mat[0][0]+pose_mat[1][1]+pose_mat[2][2]))/2
r_x = (pose_mat[2][1]-pose_mat[1][2])/(4*r_w)
r_y = (pose_mat[0][2]-pose_mat[2][0])/(4*r_w)
r_z = (pose_mat[1][0]-pose_mat[0][1])/(4*r_w)
x = pose_mat[0][3]
y = pose_mat[1][3]
z = pose_mat[2][3]
return [x,y,z,r_w,r_x,r_y,r_z]
#Define a class to make it easy to append pose matricies and convert to both Euler and Quaternion for plotting
class pose_sample_buffer():
def __init__(self):
self.i = 0
self.index = []
self.time = []
self.x = []
self.y = []
self.z = []
self.yaw = []
self.pitch = []
self.roll = []
self.r_w = []
self.r_x = []
self.r_y = []
self.r_z = []
def append(self,pose_mat,t):
self.time.append(t)
self.x.append(pose_mat[0][3])
self.y.append(pose_mat[1][3])
self.z.append(pose_mat[2][3])
self.yaw.append(180 / math.pi * math.atan(pose_mat[1][0] /pose_mat[0][0]))
self.pitch.append(180 / math.pi * math.atan(-1 * pose_mat[2][0] / math.sqrt(pow(pose_mat[2][1], 2) + math.pow(pose_mat[2][2], 2))))
self.roll.append(180 / math.pi * math.atan(pose_mat[2][1] /pose_mat[2][2]))
r_w = math.sqrt(abs(1+pose_mat[0][0]+pose_mat[1][1]+pose_mat[2][2]))/2
self.r_w.append(r_w)
self.r_x.append((pose_mat[2][1]-pose_mat[1][2])/(4*r_w))
self.r_y.append((pose_mat[0][2]-pose_mat[2][0])/(4*r_w))
self.r_z.append((pose_mat[1][0]-pose_mat[0][1])/(4*r_w))
def get_pose(vr_obj):
return vr_obj.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0, openvr.k_unMaxTrackedDeviceCount)
class vr_tracked_device():
def __init__(self,vr_obj,index,device_class):
self.device_class = device_class
self.index = index
self.vr = vr_obj
@lru_cache(maxsize=None)
def get_serial(self):
return self.vr.getStringTrackedDeviceProperty(self.index, openvr.Prop_SerialNumber_String)
def get_model(self):
return self.vr.getStringTrackedDeviceProperty(self.index, openvr.Prop_ModelNumber_String)
def get_battery_percent(self):
return self.vr.getFloatTrackedDeviceProperty(self.index, openvr.Prop_DeviceBatteryPercentage_Float)
def is_charging(self):
return self.vr.getBoolTrackedDeviceProperty(self.index, openvr.Prop_DeviceIsCharging_Bool)
def sample(self,num_samples,sample_rate):
interval = 1/sample_rate
rtn = pose_sample_buffer()
sample_start = time.time()
for i in range(num_samples):
start = time.time()
pose = get_pose(self.vr)
rtn.append(pose[self.index].mDeviceToAbsoluteTracking,time.time()-sample_start)
sleep_time = interval- (time.time()-start)
if sleep_time>0:
time.sleep(sleep_time)
return rtn
def get_pose_euler(self, pose=None):
if pose == None:
pose = get_pose(self.vr)
if pose[self.index].bPoseIsValid:
return convert_to_euler(pose[self.index].mDeviceToAbsoluteTracking)
else:
return None
def get_pose_matrix(self, pose=None):
if pose == None:
pose = get_pose(self.vr)
if pose[self.index].bPoseIsValid:
return pose[self.index].mDeviceToAbsoluteTracking
else:
return None
def get_velocity(self, pose=None):
if pose == None:
pose = get_pose(self.vr)
if pose[self.index].bPoseIsValid:
return pose[self.index].vVelocity
else:
return None
def get_angular_velocity(self, pose=None):
if pose == None:
pose = get_pose(self.vr)
if pose[self.index].bPoseIsValid:
return pose[self.index].vAngularVelocity
else:
return None
def get_pose_quaternion(self, pose=None):
if pose == None:
pose = get_pose(self.vr)
if pose[self.index].bPoseIsValid:
return convert_to_quaternion(pose[self.index].mDeviceToAbsoluteTracking)
else:
return None
def controller_state_to_dict(self, pControllerState):
# This function is graciously borrowed from https://gist.github.com/awesomebytes/75daab3adb62b331f21ecf3a03b3ab46
# docs: https://github.com/ValveSoftware/openvr/wiki/IVRSystem::GetControllerState
d = {}
d['unPacketNum'] = pControllerState.unPacketNum
# on trigger .y is always 0.0 says the docs
d['trigger'] = pControllerState.rAxis[1].x
# 0.0 on trigger is fully released
# -1.0 to 1.0 on joystick and trackpads
d['trackpad_x'] = pControllerState.rAxis[0].x
d['trackpad_y'] = pControllerState.rAxis[0].y
# These are published and always 0.0
# for i in range(2, 5):
# d['unknowns_' + str(i) + '_x'] = pControllerState.rAxis[i].x
# d['unknowns_' + str(i) + '_y'] = pControllerState.rAxis[i].y
d['ulButtonPressed'] = pControllerState.ulButtonPressed
d['ulButtonTouched'] = pControllerState.ulButtonTouched
# To make easier to understand what is going on
# Second bit marks menu button
d['menu_button'] = bool(pControllerState.ulButtonPressed >> 1 & 1)
# 32 bit marks trackpad
d['trackpad_pressed'] = bool(pControllerState.ulButtonPressed >> 32 & 1)
d['trackpad_touched'] = bool(pControllerState.ulButtonTouched >> 32 & 1)
# third bit marks grip button
d['grip_button'] = bool(pControllerState.ulButtonPressed >> 2 & 1)
# System button can't be read, if you press it
# the controllers stop reporting
return d
def get_controller_inputs(self):
result, state = self.vr.getControllerState(self.index)
return self.controller_state_to_dict(state)
def trigger_haptic_pulse(self, duration_micros=1000, axis_id=0):
"""
Causes devices with haptic feedback to vibrate for a short time.
"""
self.vr.triggerHapticPulse(self.index ,axis_id, duration_micros)
class vr_tracking_reference(vr_tracked_device):
def get_mode(self):
return self.vr.getStringTrackedDeviceProperty(self.index,openvr.Prop_ModeLabel_String).decode('utf-8').upper()
def sample(self,num_samples,sample_rate):
print("Warning: Tracking References do not move, sample isn't much use...")
class triad_openvr():
def __init__(self, configfile_path=None):
# Initialize OpenVR in the
self.vr = openvr.init(openvr.VRApplication_Other)
self.vrsystem = openvr.VRSystem()
# Initializing object to hold indexes for various tracked objects
self.object_names = {"Tracking Reference":[],"HMD":[],"Controller":[],"Tracker":[]}
self.devices = {}
self.device_index_map = {}
poses = self.vr.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0,
openvr.k_unMaxTrackedDeviceCount)
# Loading config file
if configfile_path:
try:
with open(configfile_path, 'r') as json_data:
config = json.load(json_data)
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
print('config.json not found.')
exit(1)
# Iterate through the pose list to find the active devices and determine their type
for i in range(openvr.k_unMaxTrackedDeviceCount):
if poses[i].bDeviceIsConnected:
device_serial = self.vr.getStringTrackedDeviceProperty(i,openvr.Prop_SerialNumber_String).decode('utf-8')
for device in config['devices']:
if device_serial == device['serial']:
device_name = device['name']
self.object_names[device['type']].append(device_name)
self.devices[device_name] = vr_tracked_device(self.vr,i,device['type'])
else:
# Iterate through the pose list to find the active devices and determine their type
for i in range(openvr.k_unMaxTrackedDeviceCount):
if poses[i].bDeviceIsConnected:
self.add_tracked_device(i)
def __del__(self):
openvr.shutdown()
def get_pose(self):
return get_pose(self.vr)
def poll_vr_events(self):
"""
Used to poll VR events and find any new tracked devices or ones that are no longer tracked.
"""
event = openvr.VREvent_t()
while self.vrsystem.pollNextEvent(event):
if event.eventType == openvr.VREvent_TrackedDeviceActivated:
self.add_tracked_device(event.trackedDeviceIndex)
elif event.eventType == openvr.VREvent_TrackedDeviceDeactivated:
#If we were already tracking this device, quit tracking it.
if event.trackedDeviceIndex in self.device_index_map:
self.remove_tracked_device(event.trackedDeviceIndex)
def add_tracked_device(self, tracked_device_index):
i = tracked_device_index
device_class = self.vr.getTrackedDeviceClass(i)
if (device_class == openvr.TrackedDeviceClass_Controller):
device_name = "controller_"+str(len(self.object_names["Controller"])+1)
self.object_names["Controller"].append(device_name)
self.devices[device_name] = vr_tracked_device(self.vr,i,"Controller")
self.device_index_map[i] = device_name
elif (device_class == openvr.TrackedDeviceClass_HMD):
device_name = "hmd_"+str(len(self.object_names["HMD"])+1)
self.object_names["HMD"].append(device_name)
self.devices[device_name] = vr_tracked_device(self.vr,i,"HMD")
self.device_index_map[i] = device_name
elif (device_class == openvr.TrackedDeviceClass_GenericTracker):
device_name = "tracker_"+str(len(self.object_names["Tracker"])+1)
self.object_names["Tracker"].append(device_name)
self.devices[device_name] = vr_tracked_device(self.vr,i,"Tracker")
self.device_index_map[i] = device_name
elif (device_class == openvr.TrackedDeviceClass_TrackingReference):
device_name = "tracking_reference_"+str(len(self.object_names["Tracking Reference"])+1)
self.object_names["Tracking Reference"].append(device_name)
self.devices[device_name] = vr_tracking_reference(self.vr,i,"Tracking Reference")
self.device_index_map[i] = device_name
def remove_tracked_device(self, tracked_device_index):
if tracked_device_index in self.device_index_map:
device_name = self.device_index_map[tracked_device_index]
self.object_names[self.devices[device_name].device_class].remove(device_name)
del self.device_index_map[tracked_device_index]
del self.devices[device_name]
else:
raise Exception("Tracked device index {} not valid. Not removing.".format(tracked_device_index))
def rename_device(self,old_device_name,new_device_name):
self.devices[new_device_name] = self.devices.pop(old_device_name)
for i in range(len(self.object_names[self.devices[new_device_name].device_class])):
if self.object_names[self.devices[new_device_name].device_class][i] == old_device_name:
self.object_names[self.devices[new_device_name].device_class][i] = new_device_name
def print_discovered_objects(self):
for device_type in self.object_names:
plural = device_type
if len(self.object_names[device_type])!=1:
plural+="s"
print("Found "+str(len(self.object_names[device_type]))+" "+plural)
for device in self.object_names[device_type]:
if device_type == "Tracking Reference":
print(" "+device+" ("+self.devices[device].get_serial()+
", Mode "+self.devices[device].get_model()+
", "+self.devices[device].get_model()+
")")
else:
print(" "+device+" ("+self.devices[device].get_serial()+
", "+self.devices[device].get_model()+")")

View File

@ -0,0 +1,30 @@
import triad_openvr
import time
import sys
import struct
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ('10.0.1.48', 8051)
v = triad_openvr.triad_openvr()
v.print_discovered_objects()
if len(sys.argv) == 1:
interval = 1/250
elif len(sys.argv) == 2:
interval = 1/float(sys.argv[1])
else:
print("Invalid number of arguments")
interval = False
if interval:
while(True):
start = time.time()
txt = ""
data = v.devices["tracker_1"].get_pose_quaternion()
sent = sock.sendto(struct.pack('d'*len(data), *data), server_address)
print("\r" + txt, end="")
sleep_time = interval-(time.time()-start)
if sleep_time>0:
time.sleep(sleep_time)

View File

@ -0,0 +1,58 @@
//The following code can be used to receive pose data from udp_emitter.py and use it to track an object in unity
using UnityEngine;
using System;
using System.Net;
using System.Net.Sockets;
using System.Threading;
public class udp_tracked_object : MonoBehaviour {
Thread receiveThread;
UdpClient client;
private Double[] float_array;
private int port = 8051;
// Use this for initialization
void Start () {
float_array = new Double[7];
receiveThread = new Thread(new ThreadStart(ReceiveData));
receiveThread.IsBackground = true;
receiveThread.Start();
}
// Update is called once per frame
void Update () {
transform.position = new Vector3((float) float_array[0], (float)float_array[1], (float)float_array[2]);
transform.rotation = new Quaternion((float)float_array[3], (float)float_array[4], (float)float_array[5], (float)float_array[6]);
}
void OnApplicationQuit()
{
if (receiveThread != null)
receiveThread.Abort();
client.Close();
}
// receive thread
private void ReceiveData()
{
port = 8051;
client = new UdpClient(port);
print("Starting Server");
while (true)
{
try
{
IPEndPoint anyIP = new IPEndPoint(IPAddress.Any, 0);
byte[] data = client.Receive(ref anyIP);
for (int i = 0; i < data.Length; i++)
float_array[i] = BitConverter.ToDouble(data, i * 8);
}
catch (Exception err)
{
print(err.ToString());
}
}
}
}

BIN
Docs/Image/15灯基站.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 103 KiB

BIN
Docs/Image/9灯基站.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 102 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 268 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 934 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 555 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 862 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 527 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 485 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 917 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 724 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 581 KiB

BIN
Docs/Image/Tracker3.0.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 485 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 851 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 233 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 164 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 305 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 606 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 386 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 774 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 840 KiB

View File

@ -0,0 +1,120 @@
# Lighthouse定位原理
2016年Valve携手HTC一同发布了跨时代般的VR头显:HTC VIVE,随之而来的是一套全新的空间定位技术命名为Lighthouse的定位系统具备高精度、高响应、轻便易用等特点至今仍是VR全身追踪玩家首选的追踪设备。本文章详细解析Lighthouse初代系统1.0基站系统与不完全解析第二代系统2017年6月发布的2.0基站系统基站原理与追踪器定位原理为方便理解将以1.0基站指代初代系统2.0基站指代第二代系统。
## 一、Lighthouse定位原理
### 1.1 Lighthouse定位架构
每个1.0基站包含一个称为同步闪光灯的红外灯阵列和俩组马达集成激光发射器。系统内20ms为一个扫描周期。基站的定位原理如下图所示
<img src="./Image/positioning-principle.png" style="zoom:80%;" />
一个周期的定位过程1、首先基站的红外灯阵列闪烁一次发射一组红外光同步光覆盖整个扫描范围当追踪器*1 上一组光敏每个追踪器含有多组光敏传感器组件搭载的光电转换芯片接收到该端光敏传感器捕捉到的来自基站的红外同步光信息后启动全部传感器与惯性系统IMU并将数据清零。2、基站的X轴马达横向发射激光旋转10ms闪烁一次后停止3、基站的红外灯阵列再闪烁一次追踪器捕捉到第二次红外光同步信息后将准备捕捉Y轴光数据 4、Y轴马达纵向发射激光旋转10ms闪烁一次后停止至基站下次红外灯阵列闪烁后代表一个扫描周期结束。如下图所示
![](./Image/Lighthouse扫描过程.webp)
### 1.2 Lighthouse基站运行模式
1.0基站共有三种运行模式通过按钮切换单独A模式有线A、B模式无线B、C模式。一个空间内只运行一个基站时设置为A模式或B模式均可单独运行。一个空间内运行双基站当俩个基站设置为A模式与B模式有线模式由A模式的基站作为主基站。将线缆连接俩个基站后B模式的基站将与A模式基站通过线缆同步扫描周期同步红外光阵列点亮时间与马达转速、激光发射器点亮间当基站处于B、C模式无线模式由B模式的基站作为主基站C模式的基站上的一组光敏捕捉到B模式基站的红外同步光后通过分析一段时间内捕捉到的红外同步光的频率便可与B基站同步扫描周期同步红外光阵列点亮时间与马达转速、激光发射器点亮时间当一个空间内运行双基站时由追踪器判定使用哪一个基站的激光当多组光敏同时捕捉到某一个基站的光信息最多时优先使用该组基站的激光参与融合计算。
## 二、 Lighthouse基站拆解
### 2.1 基站的面板拆解
1.0基站有俩种型号分别为9灯与15灯基站(最早生产15灯基站2017年后生产的基站基本为9灯基站)9灯基站红外光同步阵列含9颗灯珠、15灯基站(红外光同步阵列含15颗灯珠)。9灯基站与15灯基站区别为红外同步阵列灯珠数量定位效果与原理相同9灯阵列灯珠亮度更高。
<img src="./Image/15灯基站.png" style="zoom:33%;" />
<center>15灯基站运行图片</center>
<img src="./Image/9灯基站.png" style="zoom:33%;" />
<center>9灯基站运行图片</center>
Tracker1.0等1.0追踪器搭载TS3633芯片设备将与基站完成红外光同步识别捕捉到的红外光信息为基站发出时才会进行定位。而后代更换TS4231或TSTS4631芯片的追踪器(如Tracker2018、Tracker3.0、Vive Pro头显与Vive2.0手柄等上取消了红外光同步模式当这类追踪器运行在1.0基站模式下,捕捉到红外光信息后将直接启动全部传感器,但无法判断是否为基站发射的同步光。当环境存在其他发射红外光的设备时,追踪器可能将其会误判为基站发射的同步光而错误定位,也就会产生所谓“干扰”丢追现象。
<img src="./Image/基站拆解图.png" alt="基站拆解图" style="zoom:67%;" />
### 2.2 Tracker 1.0跟踪器原理和拆解
每个追踪器表面都覆盖有多组光敏传感器与光电转换器。当一组光敏传感器俩次捕捉到红外同步光后分俩次捕捉到俩组激光。随后由该组光敏组件上搭载的光电转换器如TS3633等开始分别计算这俩组X轴与Y轴激光分别到达该组光敏的角度与时间这个时间正好是基站X轴与Y轴马达旋转到特定的面、点亮激光发射器的时间与角度基站位置为绝对静止马达转速与激光发射器点亮时间为已知一组马达一个周期旋转10ms并闪烁一次而分布在追踪器上的光敏传感器的位置也是已知的便可依此得到一组时间差数据发送给微控制器与FPGA。追踪器通过结合多组光电转换器得到的时间差数据并融合IMU惯性系统进行计算随后将数据发送给电脑便可得出追踪器的绝对位置与运动轨迹。下图为Tracker1.0跟踪器的拆解图:
<img src="./Image/Tracker拆解图.png" alt="Tracker拆解图" style="zoom:67%;" />
<center>Tracker1.0上覆盖的多组光敏与光电转换器</center>
<img src="./Image/Tracker1.0 Mainboard.png" style="zoom: 67%;" />
<center>Tracker1.0主板</center>
<img src="./Image/Tracker1.0主板2.png" style="zoom: 67%;" />
<center>Tracker1.0主板</center>
### 2.3 基站2.0结构和原理
第二代基站将原先俩组马达与一组红外光阵列的结构改为单马达集成俩组激光发射器,取消红外光同步模式。每组激光发射器的相对位置有差别(如下图所示),每组发射器点亮时间也略有间隔,发射的每组激光都包含信息(包括识别信息)。当搭载TS4231或TSTS4631芯片的追踪器如Tracker2018、Tracker3.0、Vive Pro头显与Vive2.0手柄等接收到来自2.0基站的光信息后,追踪器可快速归位并直接进入定位状态,并且可识别捕捉到的激光来自哪一个基站。
![](./Image/基站2.0原理1.png)
<center>单马达集成俩组激光发射器结构(来自网络)‍</center>
<img src="./Image/基站2.0原理拆解.png" alt="基站2.0原理拆解" style="zoom:67%;" />
<center>单马达集成俩组激光发射器结构(来自网络)‍</center>
2.0基站扫描过程马达将快速旋转至设定转速集成的俩组激光发射器旋转至特定位置时间隔几纳秒分别闪烁一次呈V字形发射X、Y轴激光。激光组件闪烁俩次后至下一次旋转到特定位置并再次闪烁前为一个扫描周期。如下图所示
<img src="./Image/基站2.0扫描过程.webp" style="zoom:67%;" />
<center>2.0基站运行过程(来自网络)</center>
2.0基站取消了红外光同步模式而改为激光通讯使得每个基站可独立运行多基站需要设置信道理论最多16个基站同时运行且启动、恢复对比1.0基站更加迅速但不再兼容旧款搭载TS3633芯片无法解码短波激光信息的追踪器如Tracker1.0Vive初代头显与Vive 1.0手柄等)。**单马达结构也存在弊端**。集成俩组激光发射器且转速非常高的马达更容易损耗。当系统中某个基站上的一组激光发射器出现老化或马达转速异常时,将直接影响到整个系统的定位稳定性。且独立运行且基站间无同步的工作模式更容易导致多个基站之间发射的激光频率产生差异。一个空间内同时运行多个基站时,如果每个基站的相对位置过于紧凑或都处于同一平面(扫描区域重合较多)可能会导致追踪器对多个方向的激光束接收混乱,产生所谓“干扰”丢追现象。
### 2.4 基站2.0模式下追踪器定位原理
每个追踪器表面都覆盖有多组光敏传感器与光电转换器。当一组光敏传感器俩次捕捉到来自基站的激光后由该组光敏组件上搭载的光电转换器TS4231或TS4631开始解码短波激光信息并分别计算这俩组X轴与Y轴激光到达该组光敏的角度与时间这个时间正好是基站俩组激光发射器旋转至特定位置的时间与发射激光的角度基站位置为绝对静止马达转速与激光发射器点亮时间为已知而分布在追踪器上的光敏传感器的位置也是已知的便可依此得到一组时间差数据发送给微控制器与FPGA。追踪器通过结合多组光电转换器得到的时间差数据并融合IMU惯性系统进行计算随后将数据发送给电脑便可得出追踪器的绝对位置与运动轨迹。
<img src="./Image/Tracker2.0主板.png" style="zoom:67%;" />
<center>Tracker2.0上覆盖的多组光敏传感器与光电转换器(来自网络)</center>
Tracker2018拆解图如下图所示:
<img src="./Image/Tracker2.0主板1.png" style="zoom:67%;" />
<center>Tracker2018搭载的TS4231芯片将刷新率从上一代TS3633的60Hz提升到了100Hz效率更高</center>
<img src="./Image/Tracker2.0主板2.png" style="zoom:67%;" />
<center>Tracker2018主板</center>
<img src="./Image/Tracker2.0主板3.png" style="zoom:67%;" />
<center>Tracker2018使用ATSAMG55微处理器</center>
### 2.5 Tracker3.0拆解图
Tracker3.0压缩了体积,但保持了覆盖光敏组件的数量。
<img src="./Image/Tracker3.0.png" style="zoom:67%;" />
<img src="./Image/Tracker3.0-光敏模组.png" style="zoom:67%;" />
<center>Tracker3.0上搭载的TS4631芯片</center>
<img src="./Image/Tracker3.0-主板.png" style="zoom:67%;" />
<center>Tracker3.0上的主板</center>
<img src="./Image/Tracker3.0-主板2.png" style="zoom:67%;" />
<center>Tracker3.0上的主板</center>
<img src="./Image/Tracker3.0-手柄TS4631.png" style="zoom:67%;" />
<center>Index手柄上搭载的TS4631芯片特写</center>
<img src="./Image/TundraTracker-TS4631.png" style="zoom:67%;" />
<center>N7R似乎采用的是和HTC同款方案效果却不及HTC</center>

5
README.md Normal file
View File

@ -0,0 +1,5 @@
# Lighthouse-UESTC
描述这个项目是利用valve的高精度激光定位系统Lighthouse实现对其他非高精度的定位设备进行误差评估的系统。Lighthouse的工作原理文档路径为`Docs\Lighthouse工作原理.md`。
一、