diff --git a/README.md b/README.md index 8c43937..4ccfe82 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,14 @@ # Overview This project implements a WebRTC service for the Comma 3 which means that it aims to transmit the video feeds as fast as possible. + +## uinput + +/etc/udev/rules.d/10-allow-uinput.rules + +``` +# uncomment in case of: +# evdev.uinput.UInputError: "/dev/uinput" cannot be opened for writing +# see https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=827240 +#KERNEL=="uinput", SUBSYSTEM=="misc", OPTIONS+="static_node=uinput", TAG+="uaccess", GROUP="input", MODE="0660" +``` \ No newline at end of file diff --git a/desktop_stream_track.py b/desktop_stream_track.py index fbcf7ca..c698518 100755 --- a/desktop_stream_track.py +++ b/desktop_stream_track.py @@ -5,6 +5,12 @@ from aiortc import VideoStreamTrack import Xlib import Xlib.display import os +import pyautogui +import numpy +import evdev +import keymap + +pyautogui.FAILSAFE = False # https://ffmpeg.org/ffmpeg-devices.html#x11grab class DesktopStreamTrack(VideoStreamTrack): @@ -18,6 +24,7 @@ class DesktopStreamTrack(VideoStreamTrack): 'video_size': str(self.resolution.width) + "x" + str(self.resolution.height) } self.container = av.open(':0', format='x11grab', options=options) + self.ui = evdev.UInput() async def recv(self): pts, time_base = await self.next_timestamp() @@ -29,6 +36,33 @@ class DesktopStreamTrack(VideoStreamTrack): frame.time_base = time_base return frame + def handle_message(self, data): + if data["action"] == "mousemove": + x = numpy.interp(data["cursorPositionX"], (0, data["displayWidth"]), (0, self.resolution.width)) + y = numpy.interp(data["cursorPositionY"], (0, data["displayHeight"]), (0, self.resolution.height)) + pyautogui.moveTo(x, y, _pause=False) + elif data["action"] == "joystick": + x = numpy.interp(data["x"], (-38, 38), (0, self.resolution.width)) + y = numpy.interp(data["y"], (-38, 38), (self.resolution.height, 0)) + print(f'{data["y"]} {self.resolution.height} {y}') + pyautogui.moveTo(x, y, _pause=False) + elif data["action"] == "click": + pyautogui.click() + elif data["action"] == "rightclick": + pyautogui.rightClick() + elif data["action"] == "keyboard": + try: + keymap.reload() + osKey = keymap.iOStoLinux[data["key"]] + self.ui.write(evdev.ecodes.EV_KEY, osKey, data["direction"]) + self.ui.syn() + except KeyError: + print(f"Unknown key: {data['key']}") + + def stop(self) -> None: + super().stop() + self.ui.close() + if __name__ == "__main__": from time import time_ns import sys diff --git a/keymap.py b/keymap.py new file mode 100644 index 0000000..74f20dc --- /dev/null +++ b/keymap.py @@ -0,0 +1,95 @@ +from evdev import ecodes as e + +# Some info can be found here +# https://www.kernel.org/doc/Documentation/input/event-codes.txt + +# The full list of key codes for linux can be found here: +# https://github.com/torvalds/linux/blob/master/include/uapi/linux/input-event-codes.h + +# I could not find a good list for iOS so I just tapped the keys one by one. +# I only have access to the iPad Folio keyboard so those are the only keys mapped. + +# Also, my caps lock is mapped to escape in the iOS settings, so no caps lock is mapped. +# This keyboard lacks a physical escape key (but the capslock map is a good consolation) + +# Map of iOS to Linux key codes +iOStoLinux = { + # Row 1 of 5 + 53: e.KEY_GRAVE, + 30: e.KEY_1, + 31: e.KEY_2, + 32: e.KEY_3, + 33: e.KEY_4, + 34: e.KEY_5, + 35: e.KEY_6, + 36: e.KEY_7, + 37: e.KEY_8, + 38: e.KEY_9, + 39: e.KEY_0, + 45: e.KEY_MINUS, + 46: e.KEY_EQUAL, + 42: e.KEY_BACKSPACE, + + # Row 2 of 5 + 43: e.KEY_TAB, + 20: e.KEY_Q, + 26: e.KEY_W, + 8: e.KEY_E, + 21: e.KEY_R, + 23: e.KEY_T, + 28: e.KEY_Y, + 24: e.KEY_U, + 12: e.KEY_I, + 18: e.KEY_O, + 19: e.KEY_P, + 47: e.KEY_LEFTBRACE, + 48: e.KEY_RIGHTBRACE, + 49: e.KEY_BACKSLASH, + + # Row 3 of 5 + 41: e.KEY_ESC, + 4: e.KEY_A, + 22: e.KEY_S, + 7: e.KEY_D, + 9: e.KEY_F, + 10: e.KEY_G, + 11: e.KEY_H, + 13: e.KEY_J, + 14: e.KEY_K, + 15: e.KEY_L, + 51: e.KEY_SEMICOLON, + 52: e.KEY_APOSTROPHE, + 40: e.KEY_ENTER, + + # Row 4 of 5 + 225: e.KEY_LEFTSHIFT, + 29: e.KEY_Z, + 27: e.KEY_X, + 6: e.KEY_C, + 25: e.KEY_V, + 5: e.KEY_B, + 17: e.KEY_N, + 16: e.KEY_M, + 54: e.KEY_COMMA, + 55: e.KEY_DOT, + 56: e.KEY_SLASH, + 229: e.KEY_RIGHTSHIFT, + + # Row 5 of 5 + 224: e.KEY_LEFTCTRL, + 226: e.KEY_LEFTALT, + 227: e.KEY_LEFTMETA, + 44: e.KEY_SPACE, + 231: e.KEY_RIGHTMETA, + 230: e.KEY_RIGHTALT, + 80: e.KEY_LEFT, + 82: e.KEY_UP, + 81: e.KEY_DOWN, + 79: e.KEY_RIGHT +} + +# Useful while creating the mapping... +# import importlib +# import sys +# def reload(): +# importlib.reload(sys.modules[__name__]) \ No newline at end of file diff --git a/secureput/secureput_signaling.py b/secureput/secureput_signaling.py index b577a1f..111bc38 100644 --- a/secureput/secureput_signaling.py +++ b/secureput/secureput_signaling.py @@ -62,7 +62,6 @@ class SecureputSignaling(WebsocketSignaling): async def send(self, descr): data = self.__object_to_string(descr) - print("Websocket send: %s" % data) await self._websocket.send(data + '\n') async def receive(self): diff --git a/server.py b/server.py index 6e4c5f3..f0ec89a 100755 --- a/server.py +++ b/server.py @@ -11,8 +11,7 @@ from compressed_vipc_track import VisionIpcTrack from desktop_stream_track import DesktopStreamTrack from aiortc.contrib.signaling import BYE from secureput.secureput_signaling import SecureputSignaling -import pyautogui -import numpy + from aiortc.contrib.media import MediaBlackhole # optional, for better performance @@ -40,7 +39,6 @@ async def heap_snapshot(): print(stat) await asyncio.sleep(10) -pyautogui.FAILSAFE = False cams = ["roadEncodeData","wideRoadEncodeData","driverEncodeData"] cam = 2 @@ -90,13 +88,8 @@ async def signal(pc, signaling): @channel.on('message') async def on_message(message): data = json.loads(message) - if data["action"] == "mousemove" and desktop_track != None: - pyautogui.moveTo(data["x"], data["y"], _pause=False) - if data["action"] == "joystick" and desktop_track != None: - x = numpy.interp(data["x"], (-40, 40), (0, desktop_track.resolution.width)) - y = numpy.interp(data["y"], (-40, 40), (desktop_track.resolution.height, 0)) - print(f'{data["y"]} {desktop_track.resolution.height} {y}') - pyautogui.moveTo(x, y, _pause=False) + if desktop_track: + desktop_track.handle_message(data) @pc.on("track") def on_track(track): @@ -142,9 +135,9 @@ if __name__ == "__main__": pc = RTCPeerConnection(configuration=RTCConfiguration([RTCIceServer(args.stun_server)])) signaling = SecureputSignaling(args.signaling_server) - # coro = signal(pc, signaling) + coro = signal(pc, signaling) # coro = asyncio.gather(watchdog.check_memory(), signal(pc, signaling)) - coro = asyncio.gather(heap_snapshot(), signal(pc, signaling)) + # coro = asyncio.gather(heap_snapshot(), signal(pc, signaling)) # run event loop