summaryrefslogtreecommitdiff
path: root/src/conversation/gnunet_gst_test.c
blob: 3e1454c5bbd827424fd19fe71fb7a0d02ed0e484 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
#include "gnunet_gst_def.h"
#include "gnunet_gst.h"

int
main (int argc, char *argv[])
{
  struct GNUNET_gstData *gst;
  GstBus *bus;
  GstMessage *msg;
  GstElement *gnunetsrc, *gnunetsink, *source, *sink, *encoder, *decoder;



  // audio_message = GNUNET_malloc (UINT16_MAX);
  //audio_message->header.type = htons (GNUNET_MESSAGE_TYPE_CONVERSATION_AUDIO);


  //GstPipeline *pipeline; 

  gst = (GNUNET_gstData*)malloc(sizeof(struct GNUNET_gstData));

  //gst->audio_message.header.type = htons (GNUNET_MESSAGE_TYPE_CONVERSATION_AUDIO);


  gg_load_configuration(gst);
/*
  gst->audiobackend = JACK;
  gst->dropsilence = TRUE;
  gst->usertp = FALSE;
  */
  /* Initialize GStreamer */
  gst_init (&argc, &argv);
   
  gst->pipeline = GST_PIPELINE(gst_pipeline_new ("gnunet-media-helper"));

#ifdef IS_SPEAKER
  int type = SPEAKER;
  printf("this is the speaker \n");
#endif
#ifdef IS_MIC
  int type = MICROPHONE;
  printf("this is the microphone \n");

#endif
  if ( type == SPEAKER)
  {

    gnunetsrc = GST_ELEMENT(get_app(gst, SOURCE));  

    sink = GST_ELEMENT(get_audiobin(gst, SINK));
    decoder = GST_ELEMENT(get_coder(gst, DECODER));
    gst_bin_add_many( GST_BIN(gst->pipeline), gnunetsrc, decoder, sink, NULL);
    gst_element_link_many( gnunetsrc, decoder, sink , NULL);

  } 
  if ( type == MICROPHONE ) {

    source = GST_ELEMENT(get_audiobin(gst, SOURCE));

    encoder = GST_ELEMENT(get_coder(gst, ENCODER));

    gnunetsink = GST_ELEMENT(get_app(gst, SINK));  
  
    gst_bin_add_many( GST_BIN(gst->pipeline), source, encoder, gnunetsink, NULL);
    gst_element_link_many( source, encoder, gnunetsink , NULL);


  }
  /*
  gst_bin_add_many( GST_BIN(gst->pipeline), appsource, appsink, source, encoder, decoder, sink, NULL);
  gst_element_link_many( source, encoder, decoder, sink , NULL);
*/
  pl_graph(gst->pipeline); 
  /* Start playing */
  gst_element_set_state (GST_ELEMENT(gst->pipeline), GST_STATE_PLAYING);
  
  //pl_graph(gst->pipeline); 

  /* Wait until error or EOS */
  //bus = gst_element_get_bus (GST_ELEMENT(gst->pipeline));
  //bus_watch_id = gst_bus_add_watch (bus, gnunet_gst_bus_call, pipeline);

  gg_setup_gst_bus(gst);
// g_print ("Running...\n");


  // start pushing buffers
 if ( type == MICROPHONE )
 {


    GMainLoop *loop;
    loop = g_main_loop_new (NULL, FALSE);

     g_main_loop_run (loop);

/*
   while ( 1 )
     {
         GstFlowReturn flow;
         flow = on_appsink_new_sample (gst->appsink, gst);
    }
*/
    } 
 if ( type == SPEAKER )
 {
   while ( 1 )
   {
//      printf("read.. \n");
      gnunet_read(gst);
   }
 }
  g_print ("Returned, stopping playback\n");

  gst_object_unref (bus);
  gst_element_set_state (GST_ELEMENT(gst->pipeline), GST_STATE_NULL);
  gst_object_unref (gst->pipeline);

  return 0;
}