Compare commits

..

15 Commits

Author SHA1 Message Date
Lukas Herman
f396092609 Default high profile for x264 when possible 2020-11-01 01:08:03 -07:00
Lukas Herman
ee6cf08c44 Use miniaudio in favor of implementing ourselves
miniaudio supports various backends. This will help reducing code
surface
2020-11-01 00:48:54 -07:00
Lukas Herman
6a211aa19f Use x264 as default in example 2020-11-01 00:38:42 -07:00
Lukas Herman
b089610c27 Fix incorrect audio latency 2020-11-01 00:32:20 -07:00
Lukas Herman
1d34ec9c5d Fix broken vpx example 2020-10-31 23:55:46 -07:00
Lukas Herman
7bd3efc8b7 Fix broken conditional build 2020-10-31 11:19:02 -07:00
Lukas Herman
8396fd7aac Add an end-to-end benchmark 2020-10-31 10:35:53 -07:00
Lukas Herman
3787158dba WIP 2020-10-31 01:12:14 -07:00
Lukas Herman
640eeb0cc0 Fix panic when printing non-reference types 2020-10-30 17:50:55 -07:00
Lukas Herman
16ceb45c25 Replace <nil> -> any in prop pretty format 2020-10-30 17:42:07 -07:00
Lukas Herman
c98b3b0909 Enhance driver discovery logging 2020-10-30 17:32:40 -07:00
Lukas Herman
e6c98a844f Remove unused fmt 2020-10-30 10:01:07 -07:00
Lukas Herman
2a70c031b8 Remove unwanted logging 2020-10-30 09:04:51 -07:00
Lukas Herman
047013be95 Fix division by 0 when sample rate is not filled 2020-10-30 08:58:46 -07:00
Lukas Herman
765318feb6 Fix webrtc example 2020-10-30 01:19:03 -07:00
13 changed files with 358 additions and 534 deletions

View File

@@ -9,19 +9,21 @@ import (
"github.com/pion/mediadevices/pkg/prop"
"github.com/pion/webrtc/v2"
// If you don't like vpx, you can also use x264 by importing as below
// "github.com/pion/mediadevices/pkg/codec/x264" // This is required to use h264 video encoder
// If you don't like x264, you can also use vpx by importing as below
// "github.com/pion/mediadevices/pkg/codec/vpx" // This is required to use VP8/VP9 video encoder
// or you can also use openh264 for alternative h264 implementation
// "github.com/pion/mediadevices/pkg/codec/openh264"
"github.com/pion/mediadevices/pkg/codec/openh264" // This is required to use VP8/VP9 video encoder
"github.com/pion/mediadevices/pkg/codec/opus" // This is required to use VP8/VP9 video encoder
// or if you use a raspberry pi like, you can use mmal for using its hardware encoder
// "github.com/pion/mediadevices/pkg/codec/mmal"
"github.com/pion/mediadevices/pkg/codec/opus" // This is required to use opus audio encoder
"github.com/pion/mediadevices/pkg/codec/x264" // This is required to use h264 video encoder
// Note: If you don't have a camera or microphone or your adapters are not supported,
// you can always swap your adapters with our dummy adapters below.
// _ "github.com/pion/mediadevices/pkg/driver/videotest"
// _ "github.com/pion/mediadevices/pkg/driver/audiotest"
_ "github.com/pion/mediadevices/pkg/driver/audiotest"
_ "github.com/pion/mediadevices/pkg/driver/camera" // This is required to register camera adapter
_ "github.com/pion/mediadevices/pkg/driver/camera" // This is required to register camera adapter
_ "github.com/pion/mediadevices/pkg/driver/microphone" // This is required to register microphone adapter
)
const (
@@ -42,7 +44,23 @@ func main() {
signal.Decode(signal.MustReadStdin(), &offer)
// Create a new RTCPeerConnection
x264Params, err := x264.NewParams()
if err != nil {
panic(err)
}
x264Params.BitRate = 500_000 // 500kbps
opusParams, err := opus.NewParams()
if err != nil {
panic(err)
}
codecSelector := mediadevices.NewCodecSelector(
mediadevices.WithVideoEncoders(&x264Params),
mediadevices.WithAudioEncoders(&opusParams),
)
mediaEngine := webrtc.MediaEngine{}
codecSelector.Populate(&mediaEngine)
if err := mediaEngine.PopulateFromSDP(offer); err != nil {
panic(err)
}
@@ -58,21 +76,6 @@ func main() {
fmt.Printf("Connection State has changed %s \n", connectionState.String())
})
vp8Params, err := openh264.NewParams()
if err != nil {
panic(err)
}
vp8Params.BitRate = 300_000 // 300kbps
opusParams, err := opus.NewParams()
if err != nil {
panic(err)
}
codecSelector := mediadevices.NewCodecSelector(
mediadevices.WithVideoEncoders(&vp8Params),
mediadevices.WithAudioEncoders(&opusParams),
)
s, err := mediadevices.GetUserMedia(mediadevices.MediaStreamConstraints{
Video: func(c *mediadevices.MediaTrackConstraints) {
c.FrameFormat = prop.FrameFormat(frame.FormatYUY2)

1
go.mod
View File

@@ -6,6 +6,7 @@ require (
github.com/blackjack/webcam v0.0.0-20200313125108-10ed912a8539
github.com/jfreymuth/pulse v0.0.0-20201014123913-1e525c426c93
github.com/lherman-cs/opus v0.0.2
github.com/pion/logging v0.2.2
github.com/pion/webrtc/v2 v2.2.26
github.com/satori/go.uuid v1.2.0
golang.org/x/image v0.0.0-20200927104501-e162460cd6b5

View File

@@ -0,0 +1,11 @@
package logging
import (
"github.com/pion/logging"
)
var loggerFactory = logging.NewDefaultLoggerFactory()
func NewLogger(scope string) logging.LeveledLogger {
return loggerFactory.NewLogger(scope)
}

7
logging.go Normal file
View File

@@ -0,0 +1,7 @@
package mediadevices
import (
"github.com/pion/mediadevices/internal/logging"
)
var logger = logging.NewLogger("mediadevices")

View File

@@ -120,12 +120,15 @@ func queryDriverProperties(filter driver.FilterFn) map[driver.Driver][]prop.Medi
func selectBestDriver(filter driver.FilterFn, constraints MediaTrackConstraints) (driver.Driver, MediaTrackConstraints, error) {
var bestDriver driver.Driver
var bestProp prop.Media
var foundPropertiesLog []string
minFitnessDist := math.Inf(1)
foundPropertiesLog = append(foundPropertiesLog, "\n============ Found Properties ============")
driverProperties := queryDriverProperties(filter)
for d, props := range driverProperties {
priority := float64(d.Info().Priority)
for _, p := range props {
foundPropertiesLog = append(foundPropertiesLog, p.String())
fitnessDist, ok := constraints.MediaConstraints.FitnessDistance(p)
if !ok {
continue
@@ -139,26 +142,18 @@ func selectBestDriver(filter driver.FilterFn, constraints MediaTrackConstraints)
}
}
foundPropertiesLog = append(foundPropertiesLog, "=============== Constraints ==============")
foundPropertiesLog = append(foundPropertiesLog, constraints.String())
foundPropertiesLog = append(foundPropertiesLog, "================ Best Fit ================")
if bestDriver == nil {
var foundProperties []string
for _, props := range driverProperties {
for _, p := range props {
foundProperties = append(foundProperties, fmt.Sprint(&p))
}
}
err := fmt.Errorf(`%w:
============ Found Properties ============
%s
=============== Constraints ==============
%s
`, errNotFound, strings.Join(foundProperties, "\n\n"), &constraints)
return nil, MediaTrackConstraints{}, err
foundPropertiesLog = append(foundPropertiesLog, "Not found")
logger.Debug(strings.Join(foundPropertiesLog, "\n\n"))
return nil, MediaTrackConstraints{}, errNotFound
}
foundPropertiesLog = append(foundPropertiesLog, bestProp.String())
logger.Debug(strings.Join(foundPropertiesLog, "\n\n"))
constraints.selectedMedia = prop.Media{}
constraints.selectedMedia.MergeConstraints(constraints.MediaConstraints)
constraints.selectedMedia.Merge(bestProp)

View File

@@ -0,0 +1,82 @@
// +build e2e
package mediadevices
import (
"image"
"sync"
"testing"
"github.com/pion/mediadevices/pkg/codec/x264"
"github.com/pion/mediadevices/pkg/frame"
)
type mockVideoSource struct {
width, height int
pool sync.Pool
decoder frame.Decoder
}
func newMockVideoSource(width, height int) *mockVideoSource {
decoder, err := frame.NewDecoder(frame.FormatYUY2)
if err != nil {
panic(err)
}
return &mockVideoSource{
width: width,
height: height,
pool: sync.Pool{
New: func() interface{} {
resolution := width * height
return make([]byte, resolution*2)
},
},
decoder: decoder,
}
}
func (source *mockVideoSource) ID() string { return "" }
func (source *mockVideoSource) Close() error { return nil }
func (source *mockVideoSource) Read() (image.Image, func(), error) {
raw := source.pool.Get().([]byte)
decoded, release, err := source.decoder.Decode(raw, source.width, source.height)
source.pool.Put(raw)
if err != nil {
return nil, nil, err
}
return decoded, release, nil
}
func BenchmarkEndToEnd(b *testing.B) {
params, err := x264.NewParams()
if err != nil {
b.Fatal(err)
}
params.BitRate = 300_000
videoSource := newMockVideoSource(1920, 1080)
track := NewVideoTrack(videoSource, nil).(*VideoTrack)
defer track.Close()
reader := track.NewReader(false)
inputProp, err := detectCurrentVideoProp(track.Broadcaster)
if err != nil {
b.Fatal(err)
}
encodedReader, err := params.BuildVideoEncoder(reader, inputProp)
if err != nil {
b.Fatal(err)
}
defer encodedReader.Close()
for i := 0; i < b.N; i++ {
_, release, err := encodedReader.Read()
if err != nil {
b.Fatal(err)
}
release()
}
}

View File

@@ -47,7 +47,7 @@ Encoder *enc_new(x264_param_t param, char *preset, int *rc) {
e->param.b_repeat_headers = 1;
e->param.b_annexb = 1;
if (x264_param_apply_profile(&e->param, "baseline") < 0) {
if (x264_param_apply_profile(&e->param, "high") < 0) {
*rc = ERR_APPLY_PROFILE;
goto fail;
}
@@ -95,4 +95,4 @@ void enc_close(Encoder *e, int *rc) {
x264_encoder_close(e->h);
x264_picture_clean(&e->pic_in);
free(e);
}
}

View File

@@ -1 +1,204 @@
package microphone
import (
"encoding/binary"
"errors"
"fmt"
"io"
"time"
"unsafe"
"github.com/gen2brain/malgo"
"github.com/pion/mediadevices/internal/logging"
"github.com/pion/mediadevices/pkg/driver"
"github.com/pion/mediadevices/pkg/io/audio"
"github.com/pion/mediadevices/pkg/prop"
"github.com/pion/mediadevices/pkg/wave"
)
const (
maxDeviceIDLength = 20
// TODO: should replace this with a more flexible approach
sampleRateStep = 1000
initialBufferSize = 1024
)
var logger = logging.NewLogger("mediadevices/driver/microphone")
var ctx *malgo.AllocatedContext
var hostEndian binary.ByteOrder
var (
errUnsupportedFormat = errors.New("the provided audio format is not supported")
)
type microphone struct {
malgo.DeviceInfo
chunkChan chan []byte
}
func init() {
var err error
/*
backends := []malgo.Backend{
malgo.BackendPulseaudio,
malgo.BackendAlsa,
}
*/
ctx, err = malgo.InitContext(nil, malgo.ContextConfig{}, func(message string) {
logger.Debugf("%v\n", message)
})
if err != nil {
panic(err)
}
devices, err := ctx.Devices(malgo.Capture)
if err != nil {
panic(err)
}
for _, device := range devices {
// TODO: Detect default device and prioritize it
driver.GetManager().Register(newMicrophone(device), driver.Info{
Label: device.ID.String(),
DeviceType: driver.Microphone,
})
}
// Decide which endian
switch v := *(*uint16)(unsafe.Pointer(&([]byte{0x12, 0x34}[0]))); v {
case 0x1234:
hostEndian = binary.BigEndian
case 0x3412:
hostEndian = binary.LittleEndian
default:
panic(fmt.Sprintf("failed to determine host endianness: %x", v))
}
}
func newMicrophone(info malgo.DeviceInfo) *microphone {
return &microphone{
DeviceInfo: info,
}
}
func (m *microphone) Open() error {
m.chunkChan = make(chan []byte, 1)
return nil
}
func (m *microphone) Close() error {
if m.chunkChan != nil {
close(m.chunkChan)
m.chunkChan = nil
}
return nil
}
func (m *microphone) AudioRecord(inputProp prop.Media) (audio.Reader, error) {
var config malgo.DeviceConfig
var callbacks malgo.DeviceCallbacks
decoder, err := wave.NewDecoder(&wave.RawFormat{
SampleSize: inputProp.SampleSize,
IsFloat: inputProp.IsFloat,
Interleaved: inputProp.IsInterleaved,
})
if err != nil {
return nil, err
}
config.DeviceType = malgo.Capture
config.PerformanceProfile = malgo.LowLatency
config.Capture.Channels = uint32(inputProp.ChannelCount)
config.SampleRate = uint32(inputProp.SampleRate)
if inputProp.SampleSize == 4 && inputProp.IsFloat {
config.Capture.Format = malgo.FormatF32
} else if inputProp.SampleSize == 2 && !inputProp.IsFloat {
config.Capture.Format = malgo.FormatS16
} else {
return nil, errUnsupportedFormat
}
onRecvChunk := func(_, chunk []byte, framecount uint32) {
m.chunkChan <- chunk
}
callbacks.Data = onRecvChunk
device, err := malgo.InitDevice(ctx.Context, config, callbacks)
if err != nil {
return nil, err
}
err = device.Start()
if err != nil {
return nil, err
}
return audio.ReaderFunc(func() (wave.Audio, func(), error) {
chunk, ok := <-m.chunkChan
if !ok {
device.Stop()
device.Uninit()
return nil, func() {}, io.EOF
}
decodedChunk, err := decoder.Decode(hostEndian, chunk, inputProp.ChannelCount)
// FIXME: the decoder should also fill this information
decodedChunk.(*wave.Float32Interleaved).Size.SamplingRate = inputProp.SampleRate
return decodedChunk, func() {}, err
}), nil
}
func (m *microphone) Properties() []prop.Media {
var supportedProps []prop.Media
logger.Debug("Querying properties")
var isBigEndian bool
// miniaudio only uses the host endian
if hostEndian == binary.BigEndian {
isBigEndian = true
}
for ch := m.MinChannels; ch <= m.MaxChannels; ch++ {
for sampleRate := m.MinSampleRate; sampleRate <= m.MaxSampleRate; sampleRate += sampleRateStep {
for i := 0; i < int(m.FormatCount); i++ {
format := m.Formats[i]
supportedProp := prop.Media{
Audio: prop.Audio{
ChannelCount: int(ch),
SampleRate: int(sampleRate),
IsBigEndian: isBigEndian,
// miniaudio only supports interleaved at the moment
IsInterleaved: true,
},
}
switch malgo.FormatType(format) {
case malgo.FormatF32:
supportedProp.SampleSize = 4
supportedProp.IsFloat = true
case malgo.FormatS16:
supportedProp.SampleSize = 2
supportedProp.IsFloat = false
}
supportedProps = append(supportedProps, supportedProp)
}
}
}
// FIXME: remove this hardcoded value. Malgo doesn't support "ma_context_get_device_info" API yet. The above iterations
// will always return nothing as of now
supportedProps = append(supportedProps, prop.Media{
Audio: prop.Audio{
Latency: time.Millisecond * 20,
ChannelCount: 1,
SampleRate: 48000,
SampleSize: 4,
IsFloat: true,
IsBigEndian: isBigEndian,
IsInterleaved: true,
},
})
return supportedProps
}

View File

@@ -1,138 +0,0 @@
package microphone
import (
"io"
"time"
"github.com/jfreymuth/pulse"
"github.com/pion/mediadevices/pkg/driver"
"github.com/pion/mediadevices/pkg/io/audio"
"github.com/pion/mediadevices/pkg/prop"
"github.com/pion/mediadevices/pkg/wave"
)
type microphone struct {
c *pulse.Client
id string
samplesChan chan<- []int16
}
func init() {
pa, err := pulse.NewClient()
if err != nil {
// No pulseaudio
return
}
defer pa.Close()
sources, err := pa.ListSources()
if err != nil {
panic(err)
}
defaultSource, err := pa.DefaultSource()
if err != nil {
panic(err)
}
for _, source := range sources {
priority := driver.PriorityNormal
if defaultSource.ID() == source.ID() {
priority = driver.PriorityHigh
}
driver.GetManager().Register(&microphone{id: source.ID()}, driver.Info{
Label: source.ID(),
DeviceType: driver.Microphone,
Priority: priority,
})
}
}
func (m *microphone) Open() error {
var err error
m.c, err = pulse.NewClient()
if err != nil {
return err
}
return nil
}
func (m *microphone) Close() error {
if m.samplesChan != nil {
close(m.samplesChan)
m.samplesChan = nil
}
m.c.Close()
return nil
}
func (m *microphone) AudioRecord(p prop.Media) (audio.Reader, error) {
var options []pulse.RecordOption
if p.ChannelCount == 1 {
options = append(options, pulse.RecordMono)
} else {
options = append(options, pulse.RecordStereo)
}
latency := p.Latency.Seconds()
src, err := m.c.SourceByID(m.id)
if err != nil {
return nil, err
}
options = append(options,
pulse.RecordSampleRate(p.SampleRate),
pulse.RecordLatency(latency),
pulse.RecordSource(src),
)
samplesChan := make(chan []int16, 1)
handler := func(b []int16) (int, error) {
samplesChan <- b
return len(b), nil
}
stream, err := m.c.NewRecord(pulse.Int16Writer(handler), options...)
if err != nil {
return nil, err
}
reader := audio.ReaderFunc(func() (wave.Audio, func(), error) {
buff, ok := <-samplesChan
if !ok {
stream.Close()
return nil, func() {}, io.EOF
}
a := wave.NewInt16Interleaved(
wave.ChunkInfo{
Channels: p.ChannelCount,
Len: len(buff) / p.ChannelCount,
SamplingRate: p.SampleRate,
},
)
copy(a.Data, buff)
return a, func() {}, nil
})
stream.Start()
m.samplesChan = samplesChan
return reader, nil
}
func (m *microphone) Properties() []prop.Media {
// TODO: Get actual properties
monoProp := prop.Media{
Audio: prop.Audio{
SampleRate: 48000,
Latency: time.Millisecond * 20,
ChannelCount: 1,
},
}
stereoProp := monoProp
stereoProp.ChannelCount = 2
return []prop.Media{monoProp, stereoProp}
}

View File

@@ -1,348 +0,0 @@
package microphone
import (
"errors"
"golang.org/x/sys/windows"
"io"
"time"
"unsafe"
"github.com/pion/mediadevices/pkg/driver"
"github.com/pion/mediadevices/pkg/io/audio"
"github.com/pion/mediadevices/pkg/prop"
"github.com/pion/mediadevices/pkg/wave"
)
const (
// bufferNumber * prop.Audio.Latency is the maximum blockable duration
// to get data without dropping chunks.
bufferNumber = 5
)
// Windows APIs
var (
winmm = windows.NewLazySystemDLL("Winmm.dll")
waveInOpen = winmm.NewProc("waveInOpen")
waveInStart = winmm.NewProc("waveInStart")
waveInStop = winmm.NewProc("waveInStop")
waveInReset = winmm.NewProc("waveInReset")
waveInClose = winmm.NewProc("waveInClose")
waveInPrepareHeader = winmm.NewProc("waveInPrepareHeader")
waveInAddBuffer = winmm.NewProc("waveInAddBuffer")
waveInUnprepareHeader = winmm.NewProc("waveInUnprepareHeader")
)
type buffer struct {
waveHdr
data []int16
}
func newBuffer(samples int) *buffer {
b := make([]int16, samples)
return &buffer{
waveHdr: waveHdr{
// Sharing Go memory with Windows C API without reference.
// Make sure that the lifetime of the buffer struct is longer
// than the final access from cbWaveIn.
lpData: uintptr(unsafe.Pointer(&b[0])),
dwBufferLength: uint32(samples * 2),
},
data: b,
}
}
type microphone struct {
hWaveIn windows.Pointer
buf map[uintptr]*buffer
chBuf chan *buffer
closed chan struct{}
}
func init() {
// TODO: enum devices
driver.GetManager().Register(&microphone{}, driver.Info{
Label: "default",
DeviceType: driver.Microphone,
})
}
func (m *microphone) Open() error {
m.chBuf = make(chan *buffer)
m.buf = make(map[uintptr]*buffer)
m.closed = make(chan struct{})
return nil
}
func (m *microphone) cbWaveIn(hWaveIn windows.Pointer, uMsg uint, dwInstance, dwParam1, dwParam2 *int32) uintptr {
switch uMsg {
case MM_WIM_DATA:
b := m.buf[uintptr(unsafe.Pointer(dwParam1))]
m.chBuf <- b
case MM_WIM_OPEN:
case MM_WIM_CLOSE:
close(m.chBuf)
}
return 0
}
func (m *microphone) Close() error {
if m.hWaveIn == nil {
return nil
}
close(m.closed)
ret, _, _ := waveInStop.Call(
uintptr(unsafe.Pointer(m.hWaveIn)),
)
if err := errWinmm[ret]; err != nil {
return err
}
// All enqueued buffers are marked done by waveInReset.
ret, _, _ = waveInReset.Call(
uintptr(unsafe.Pointer(m.hWaveIn)),
)
if err := errWinmm[ret]; err != nil {
return err
}
for _, buf := range m.buf {
// Detach buffers from waveIn API.
ret, _, _ := waveInUnprepareHeader.Call(
uintptr(unsafe.Pointer(m.hWaveIn)),
uintptr(unsafe.Pointer(&buf.waveHdr)),
uintptr(unsafe.Sizeof(buf.waveHdr)),
)
if err := errWinmm[ret]; err != nil {
return err
}
}
// Now, it's ready to free the buffers.
// As microphone struct still has reference to the buffers,
// they will be GC-ed once microphone is reopened or unreferenced.
ret, _, _ = waveInClose.Call(
uintptr(unsafe.Pointer(m.hWaveIn)),
)
if err := errWinmm[ret]; err != nil {
return err
}
<-m.chBuf
m.hWaveIn = nil
return nil
}
func (m *microphone) AudioRecord(p prop.Media) (audio.Reader, error) {
for i := 0; i < bufferNumber; i++ {
b := newBuffer(
int(uint64(p.Latency) * uint64(p.SampleRate) / uint64(time.Second)),
)
// Map the buffer by its data head address to restore access to the Go struct
// in callback function. Don't resize the buffer after it.
m.buf[uintptr(unsafe.Pointer(&b.waveHdr))] = b
}
waveFmt := &waveFormatEx{
wFormatTag: WAVE_FORMAT_PCM,
nChannels: uint16(p.ChannelCount),
nSamplesPerSec: uint32(p.SampleRate),
nAvgBytesPerSec: uint32(p.SampleRate * p.ChannelCount * 2),
nBlockAlign: uint16(p.ChannelCount * 2),
wBitsPerSample: 16,
}
ret, _, _ := waveInOpen.Call(
uintptr(unsafe.Pointer(&m.hWaveIn)),
WAVE_MAPPER,
uintptr(unsafe.Pointer(waveFmt)),
windows.NewCallback(m.cbWaveIn),
0,
CALLBACK_FUNCTION,
)
if err := errWinmm[ret]; err != nil {
return nil, err
}
for _, buf := range m.buf {
// Attach buffers to waveIn API.
ret, _, _ := waveInPrepareHeader.Call(
uintptr(unsafe.Pointer(m.hWaveIn)),
uintptr(unsafe.Pointer(&buf.waveHdr)),
uintptr(unsafe.Sizeof(buf.waveHdr)),
)
if err := errWinmm[ret]; err != nil {
return nil, err
}
}
for _, buf := range m.buf {
// Enqueue buffers.
ret, _, _ := waveInAddBuffer.Call(
uintptr(unsafe.Pointer(m.hWaveIn)),
uintptr(unsafe.Pointer(&buf.waveHdr)),
uintptr(unsafe.Sizeof(buf.waveHdr)),
)
if err := errWinmm[ret]; err != nil {
return nil, err
}
}
ret, _, _ = waveInStart.Call(
uintptr(unsafe.Pointer(m.hWaveIn)),
)
if err := errWinmm[ret]; err != nil {
return nil, err
}
// TODO: detect microphone device disconnection and return EOF
reader := audio.ReaderFunc(func() (wave.Audio, func(), error) {
b, ok := <-m.chBuf
if !ok {
return nil, func() {}, io.EOF
}
select {
case <-m.closed:
default:
// Re-enqueue used buffer.
ret, _, _ := waveInAddBuffer.Call(
uintptr(unsafe.Pointer(m.hWaveIn)),
uintptr(unsafe.Pointer(&b.waveHdr)),
uintptr(unsafe.Sizeof(b.waveHdr)),
)
if err := errWinmm[ret]; err != nil {
return nil, func() {}, err
}
}
a := wave.NewInt16Interleaved(
wave.ChunkInfo{
Channels: p.ChannelCount,
Len: (int(b.waveHdr.dwBytesRecorded) / 2) / p.ChannelCount,
SamplingRate: p.SampleRate,
},
)
j := 0
for i := 0; i < a.Size.Len; i++ {
for ch := 0; ch < a.Size.Channels; ch++ {
a.SetInt16(i, ch, wave.Int16Sample(b.data[j]))
j++
}
}
return a, func() {}, nil
})
return reader, nil
}
func (m *microphone) Properties() []prop.Media {
// TODO: Get actual properties
monoProp := prop.Media{
Audio: prop.Audio{
SampleRate: 48000,
Latency: time.Millisecond * 20,
ChannelCount: 1,
},
}
stereoProp := monoProp
stereoProp.ChannelCount = 2
return []prop.Media{monoProp, stereoProp}
}
// Windows API structures
type waveFormatEx struct {
wFormatTag uint16
nChannels uint16
nSamplesPerSec uint32
nAvgBytesPerSec uint32
nBlockAlign uint16
wBitsPerSample uint16
cbSize uint16
}
type waveHdr struct {
lpData uintptr
dwBufferLength uint32
dwBytesRecorded uint32
dwUser *uint32
dwFlags uint32
dwLoops uint32
lpNext *waveHdr
reserved *uint32
}
// Windows consts
const (
MMSYSERR_NOERROR = 0
MMSYSERR_ERROR = 1
MMSYSERR_BADDEVICEID = 2
MMSYSERR_NOTENABLED = 3
MMSYSERR_ALLOCATED = 4
MMSYSERR_INVALHANDLE = 5
MMSYSERR_NODRIVER = 6
MMSYSERR_NOMEM = 7
MMSYSERR_NOTSUPPORTED = 8
MMSYSERR_BADERRNUM = 9
MMSYSERR_INVALFLAG = 10
MMSYSERR_INVALPARAM = 11
MMSYSERR_HANDLEBUSY = 12
MMSYSERR_INVALIDALIAS = 13
MMSYSERR_BADDB = 14
MMSYSERR_KEYNOTFOUND = 15
MMSYSERR_READERROR = 16
MMSYSERR_WRITEERROR = 17
MMSYSERR_DELETEERROR = 18
MMSYSERR_VALNOTFOUND = 19
MMSYSERR_NODRIVERCB = 20
WAVERR_BADFORMAT = 32
WAVERR_STILLPLAYING = 33
WAVERR_UNPREPARED = 34
WAVERR_SYNC = 35
WAVE_MAPPER = 0xFFFF
WAVE_FORMAT_PCM = 1
CALLBACK_NULL = 0
CALLBACK_WINDOW = 0x10000
CALLBACK_TASK = 0x20000
CALLBACK_FUNCTION = 0x30000
CALLBACK_THREAD = CALLBACK_TASK
CALLBACK_EVENT = 0x50000
MM_WIM_OPEN = 0x3BE
MM_WIM_CLOSE = 0x3BF
MM_WIM_DATA = 0x3C0
)
var errWinmm = map[uintptr]error{
MMSYSERR_NOERROR: nil,
MMSYSERR_ERROR: errors.New("error"),
MMSYSERR_BADDEVICEID: errors.New("bad device id"),
MMSYSERR_NOTENABLED: errors.New("not enabled"),
MMSYSERR_ALLOCATED: errors.New("already allocated"),
MMSYSERR_INVALHANDLE: errors.New("invalid handler"),
MMSYSERR_NODRIVER: errors.New("no driver"),
MMSYSERR_NOMEM: errors.New("no memory"),
MMSYSERR_NOTSUPPORTED: errors.New("not supported"),
MMSYSERR_BADERRNUM: errors.New("band error number"),
MMSYSERR_INVALFLAG: errors.New("invalid flag"),
MMSYSERR_INVALPARAM: errors.New("invalid param"),
MMSYSERR_HANDLEBUSY: errors.New("handle busy"),
MMSYSERR_INVALIDALIAS: errors.New("invalid alias"),
MMSYSERR_BADDB: errors.New("bad db"),
MMSYSERR_KEYNOTFOUND: errors.New("key not found"),
MMSYSERR_READERROR: errors.New("read error"),
MMSYSERR_WRITEERROR: errors.New("write error"),
MMSYSERR_DELETEERROR: errors.New("delete error"),
MMSYSERR_VALNOTFOUND: errors.New("value not found"),
MMSYSERR_NODRIVERCB: errors.New("no driver cb"),
WAVERR_BADFORMAT: errors.New("bad format"),
WAVERR_STILLPLAYING: errors.New("still playing"),
WAVERR_UNPREPARED: errors.New("unprepared"),
WAVERR_SYNC: errors.New("sync"),
}

View File

@@ -32,7 +32,10 @@ func DetectChanges(interval time.Duration, onChange func(prop.Media)) TransformF
dirty = true
}
latency := time.Duration(chunk.ChunkInfo().Len) * time.Second / time.Nanosecond / time.Duration(currentProp.SampleRate)
var latency time.Duration
if currentProp.SampleRate != 0 {
latency = time.Duration(chunk.ChunkInfo().Len) * time.Second / time.Nanosecond / time.Duration(currentProp.SampleRate)
}
if currentProp.Latency != latency {
currentProp.Latency = latency
dirty = true

View File

@@ -42,10 +42,17 @@ func prettifyStruct(i interface{}) string {
value := obj.Field(i)
padding := strings.Repeat(" ", level)
if value.Kind() == reflect.Struct {
switch value.Kind() {
case reflect.Struct:
rows = append(rows, fmt.Sprintf("%s%v:", padding, field.Name))
addRows(level+1, value)
} else {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
if value.IsNil() {
rows = append(rows, fmt.Sprintf("%s%v: any", padding, field.Name))
} else {
rows = append(rows, fmt.Sprintf("%s%v: %v", padding, field.Name, value))
}
default:
rows = append(rows, fmt.Sprintf("%s%v: %v", padding, field.Name, value))
}
}

View File

@@ -2,7 +2,6 @@ package mediadevices
import (
"errors"
"fmt"
"image"
"math/rand"
"sync"
@@ -123,8 +122,6 @@ func (track *baseTrack) bind(pc *webrtc.PeerConnection, encodedReader codec.Read
signalCh := make(chan chan<- struct{})
track.activePeerConnections[pc] = signalCh
fmt.Println("Binding")
go func() {
var doneCh chan<- struct{}
defer func() {
@@ -245,8 +242,6 @@ func (track *VideoTrack) Bind(pc *webrtc.PeerConnection) (*webrtc.Track, error)
}
wantCodecs := pc.GetRegisteredRTPCodecs(webrtc.RTPCodecTypeVideo)
fmt.Println(wantCodecs)
fmt.Println(&inputProp)
encodedReader, selectedCodec, err := track.selector.selectVideoCodec(wantCodecs, reader, inputProp)
if err != nil {
return nil, err
@@ -297,6 +292,9 @@ func newAudioTrackFromDriver(d driver.Driver, recorder driver.AudioRecorder, con
return nil, err
}
// FIXME: The current audio detection and audio encoder can only work with a static latency. Since the latency from the driver
// can fluctuate, we need to stabilize it. Maybe there's a better way for doing this?
reader = audio.NewBuffer(int(constraints.selectedMedia.Latency.Seconds() * float64(constraints.selectedMedia.SampleRate)))(reader)
return newAudioTrackFromReader(d, reader, selector), nil
}