libfreerdp-core: more TSG memory cleanup

This commit is contained in:
Marc-André Moreau 2012-12-12 22:03:40 -05:00
parent 102abcbef2
commit 7ff2be09b2
16 changed files with 173 additions and 78 deletions

View File

@ -191,7 +191,6 @@ int rpc_ncacn_http_recv_out_channel_response(rdpRpc* rpc)
HttpResponse* http_response;
rdpNtlm* ntlm = rpc->NtlmHttpOut->ntlm;
printf("TlsOut: %p\n", rpc->TlsOut);
http_response = http_response_recv(rpc->TlsOut);
ntlm_token_data = NULL;

View File

@ -195,6 +195,12 @@ BOOL ntlm_authenticate(rdpNtlm* ntlm)
{
SECURITY_STATUS status;
if (ntlm->outputBuffer.pvBuffer)
{
free(ntlm->outputBuffer.pvBuffer);
ntlm->outputBuffer.pvBuffer = NULL;
}
ntlm->outputBufferDesc.ulVersion = SECBUFFER_VERSION;
ntlm->outputBufferDesc.cBuffers = 1;
ntlm->outputBufferDesc.pBuffers = &ntlm->outputBuffer;
@ -253,6 +259,12 @@ void ntlm_client_uninit(rdpNtlm* ntlm)
free(ntlm->identity.Password);
free(ntlm->ServicePrincipalName);
if (ntlm->outputBuffer.pvBuffer)
{
free(ntlm->outputBuffer.pvBuffer);
ntlm->outputBuffer.pvBuffer = NULL;
}
ntlm->table->FreeCredentialsHandle(&ntlm->credentials);
ntlm->table->FreeContextBuffer(ntlm->pPackageInfo);
ntlm->table->DeleteSecurityContext(&ntlm->context);

View File

@ -552,11 +552,6 @@ rdpRpc* rpc_new(rdpTransport* transport)
rpc->max_xmit_frag = 0x0FF8;
rpc->max_recv_frag = 0x0FF8;
rpc->pdu = (RPC_PDU*) malloc(sizeof(RPC_PDU));
rpc->pdu->s = Stream_New(NULL, rpc->max_recv_frag);
rpc->RecvFrag = Stream_New(NULL, rpc->max_recv_frag);
rpc->ReceiveWindow = 0x00010000;
rpc->ChannelLifetime = 0x40000000;
@ -599,8 +594,6 @@ void rpc_free(rdpRpc* rpc)
ntlm_http_free(rpc->NtlmHttpIn);
ntlm_http_free(rpc->NtlmHttpOut);
free(rpc->pdu);
rpc_client_virtual_connection_free(rpc->VirtualConnection);
ArrayList_Clear(rpc->VirtualConnectionCookieTable);

View File

@ -701,9 +701,11 @@ struct rpc_client
wQueue* SendQueue;
RPC_PDU* pdu;
wQueue* ReceivePool;
wQueue* ReceiveQueue;
wStream* RecvFrag;
wQueue* FragmentPool;
wQueue* FragmentQueue;
@ -737,8 +739,6 @@ struct rdp_rpc
UINT32 CallId;
UINT32 PipeCallId;
RPC_PDU* pdu;
UINT32 StubCallId;
UINT32 StubFragCount;
@ -749,8 +749,6 @@ struct rdp_rpc
UINT16 max_xmit_frag;
UINT16 max_recv_frag;
wStream* RecvFrag;
UINT32 ReceiveWindow;
UINT32 ChannelLifetime;

View File

@ -93,6 +93,7 @@ int rpc_send_bind_pdu(rdpRpc* rpc)
BYTE* buffer;
UINT32 offset;
UINT32 length;
RpcClientCall* clientCall;
p_cont_elem_t* p_cont_elem;
rpcconn_bind_hdr_t* bind_pdu;
rdpSettings* settings = rpc->settings;
@ -178,14 +179,17 @@ int rpc_send_bind_pdu(rdpRpc* rpc)
CopyMemory(&buffer[offset + 8], bind_pdu->auth_verifier.auth_value, bind_pdu->auth_length);
offset += (8 + bind_pdu->auth_length);
rpc_in_write(rpc, buffer, bind_pdu->frag_length);
length = bind_pdu->frag_length;
clientCall = rpc_client_call_new(bind_pdu->call_id, 0);
ArrayList_Add(rpc->client->ClientCallList, clientCall);
rpc_send_enqueue_pdu(rpc, buffer, length);
free(bind_pdu->p_context_elem.p_cont_elem[0].transfer_syntaxes);
free(bind_pdu->p_context_elem.p_cont_elem[1].transfer_syntaxes);
free(bind_pdu->p_context_elem.p_cont_elem);
free(bind_pdu);
free(buffer);
return length;
}
@ -246,6 +250,7 @@ int rpc_send_rpc_auth_3_pdu(rdpRpc* rpc)
BYTE* buffer;
UINT32 offset;
UINT32 length;
RpcClientCall* clientCall;
rpcconn_rpc_auth_3_hdr_t* auth_3_pdu;
DEBUG_RPC("Sending rpc_auth_3 PDU");
@ -289,11 +294,14 @@ int rpc_send_rpc_auth_3_pdu(rdpRpc* rpc)
CopyMemory(&buffer[offset + 8], auth_3_pdu->auth_verifier.auth_value, auth_3_pdu->auth_length);
offset += (8 + auth_3_pdu->auth_length);
rpc_in_write(rpc, buffer, auth_3_pdu->frag_length);
length = auth_3_pdu->frag_length;
clientCall = rpc_client_call_new(auth_3_pdu->call_id, 0);
ArrayList_Add(rpc->client->ClientCallList, clientCall);
rpc_send_enqueue_pdu(rpc, buffer, length);
free(auth_3_pdu);
free(buffer);
return length;
}

View File

@ -90,6 +90,9 @@ int rpc_client_on_fragment_received_event(rdpRpc* rpc)
wStream* fragment;
rpcconn_hdr_t* header;
if (!rpc->client->pdu)
rpc->client->pdu = rpc_client_receive_pool_take(rpc);
fragment = Queue_Dequeue(rpc->client->FragmentQueue);
buffer = (BYTE*) Stream_Buffer(fragment);
@ -97,18 +100,17 @@ int rpc_client_on_fragment_received_event(rdpRpc* rpc)
if (rpc->State < RPC_CLIENT_STATE_CONTEXT_NEGOTIATED)
{
rpc->pdu->Flags = 0;
rpc->pdu->CallId = header->common.call_id;
rpc->client->pdu->Flags = 0;
rpc->client->pdu->CallId = header->common.call_id;
Stream_EnsureCapacity(rpc->pdu->s, Stream_Length(fragment));
Stream_Write(rpc->pdu->s, buffer, Stream_Length(fragment));
Stream_Length(rpc->pdu->s) = Stream_Position(rpc->pdu->s);
Stream_EnsureCapacity(rpc->client->pdu->s, Stream_Length(fragment));
Stream_Write(rpc->client->pdu->s, buffer, Stream_Length(fragment));
Stream_Length(rpc->client->pdu->s) = Stream_Position(rpc->client->pdu->s);
rpc_client_fragment_pool_return(rpc, fragment);
Queue_Enqueue(rpc->client->ReceiveQueue, rpc->pdu);
rpc->pdu = rpc_client_receive_pool_take(rpc);
Queue_Enqueue(rpc->client->ReceiveQueue, rpc->client->pdu);
rpc->client->pdu = NULL;
return 0;
}
@ -157,7 +159,7 @@ int rpc_client_on_fragment_received_event(rdpRpc* rpc)
return 0;
}
Stream_EnsureCapacity(rpc->pdu->s, header->response.alloc_hint);
Stream_EnsureCapacity(rpc->client->pdu->s, header->response.alloc_hint);
buffer = (BYTE*) Stream_Buffer(fragment);
header = (rpcconn_hdr_t*) Stream_Buffer(fragment);
@ -170,7 +172,7 @@ int rpc_client_on_fragment_received_event(rdpRpc* rpc)
rpc->StubCallId, header->common.call_id, rpc->StubFragCount);
}
Stream_Write(rpc->pdu->s, &buffer[StubOffset], StubLength);
Stream_Write(rpc->client->pdu->s, &buffer[StubOffset], StubLength);
rpc->StubFragCount++;
rpc_client_fragment_pool_return(rpc, fragment);
@ -192,13 +194,13 @@ int rpc_client_on_fragment_received_event(rdpRpc* rpc)
rpc->StubCallId = 0;
rpc->StubFragCount = 0;
rpc->pdu->Flags = RPC_PDU_FLAG_STUB;
rpc->pdu->CallId = rpc->StubCallId;
rpc->client->pdu->Flags = RPC_PDU_FLAG_STUB;
rpc->client->pdu->CallId = rpc->StubCallId;
Stream_Length(rpc->pdu->s) = Stream_Position(rpc->pdu->s);
Stream_Length(rpc->client->pdu->s) = Stream_Position(rpc->client->pdu->s);
Queue_Enqueue(rpc->client->ReceiveQueue, rpc->pdu);
rpc->pdu = rpc_client_receive_pool_take(rpc);
Queue_Enqueue(rpc->client->ReceiveQueue, rpc->client->pdu);
rpc->client->pdu = NULL;
return 0;
}
@ -212,12 +214,15 @@ int rpc_client_on_read_event(rdpRpc* rpc)
int status = -1;
rpcconn_common_hdr_t* header;
position = Stream_Position(rpc->RecvFrag);
if (!rpc->client->RecvFrag)
rpc->client->RecvFrag = rpc_client_fragment_pool_take(rpc);
if (Stream_Position(rpc->RecvFrag) < RPC_COMMON_FIELDS_LENGTH)
position = Stream_Position(rpc->client->RecvFrag);
if (Stream_Position(rpc->client->RecvFrag) < RPC_COMMON_FIELDS_LENGTH)
{
status = rpc_out_read(rpc, Stream_Pointer(rpc->RecvFrag),
RPC_COMMON_FIELDS_LENGTH - Stream_Position(rpc->RecvFrag));
status = rpc_out_read(rpc, Stream_Pointer(rpc->client->RecvFrag),
RPC_COMMON_FIELDS_LENGTH - Stream_Position(rpc->client->RecvFrag));
if (status < 0)
{
@ -225,12 +230,12 @@ int rpc_client_on_read_event(rdpRpc* rpc)
return -1;
}
Stream_Seek(rpc->RecvFrag, status);
Stream_Seek(rpc->client->RecvFrag, status);
}
if (Stream_Position(rpc->RecvFrag) >= RPC_COMMON_FIELDS_LENGTH)
if (Stream_Position(rpc->client->RecvFrag) >= RPC_COMMON_FIELDS_LENGTH)
{
header = (rpcconn_common_hdr_t*) Stream_Buffer(rpc->RecvFrag);
header = (rpcconn_common_hdr_t*) Stream_Buffer(rpc->client->RecvFrag);
if (header->frag_length > rpc->max_recv_frag)
{
@ -239,10 +244,10 @@ int rpc_client_on_read_event(rdpRpc* rpc)
return -1;
}
if (Stream_Position(rpc->RecvFrag) < header->frag_length)
if (Stream_Position(rpc->client->RecvFrag) < header->frag_length)
{
status = rpc_out_read(rpc, Stream_Pointer(rpc->RecvFrag),
header->frag_length - Stream_Position(rpc->RecvFrag));
status = rpc_out_read(rpc, Stream_Pointer(rpc->client->RecvFrag),
header->frag_length - Stream_Position(rpc->client->RecvFrag));
if (status < 0)
{
@ -250,7 +255,7 @@ int rpc_client_on_read_event(rdpRpc* rpc)
return -1;
}
Stream_Seek(rpc->RecvFrag, status);
Stream_Seek(rpc->client->RecvFrag, status);
}
}
else
@ -261,17 +266,17 @@ int rpc_client_on_read_event(rdpRpc* rpc)
if (status < 0)
return -1;
status = Stream_Position(rpc->RecvFrag) - position;
status = Stream_Position(rpc->client->RecvFrag) - position;
if (Stream_Position(rpc->RecvFrag) >= header->frag_length)
if (Stream_Position(rpc->client->RecvFrag) >= header->frag_length)
{
/* complete fragment received */
Stream_Length(rpc->RecvFrag) = Stream_Position(rpc->RecvFrag);
Stream_SetPosition(rpc->RecvFrag, 0);
Stream_Length(rpc->client->RecvFrag) = Stream_Position(rpc->client->RecvFrag);
Stream_SetPosition(rpc->client->RecvFrag, 0);
Queue_Enqueue(rpc->client->FragmentQueue, rpc->RecvFrag);
rpc->RecvFrag = rpc_client_fragment_pool_take(rpc);
Queue_Enqueue(rpc->client->FragmentQueue, rpc->client->RecvFrag);
rpc->client->RecvFrag = NULL;
rpc_client_on_fragment_received_event(rpc);
}
@ -375,8 +380,12 @@ int rpc_send_dequeue_pdu(rdpRpc* rpc)
* Implementations of this protocol MUST NOT include them when computing any of the variables
* specified by this abstract data model.
*/
rpc->VirtualConnection->DefaultInChannel->BytesSent += status;
rpc->VirtualConnection->DefaultInChannel->SenderAvailableWindow -= status;
if (header->ptype == PTYPE_REQUEST)
{
rpc->VirtualConnection->DefaultInChannel->BytesSent += status;
rpc->VirtualConnection->DefaultInChannel->SenderAvailableWindow -= status;
}
Stream_Free(pdu->s, TRUE);
free(pdu);
@ -478,11 +487,13 @@ int rpc_client_new(rdpRpc* rpc)
client->SendQueue = Queue_New(TRUE, -1, -1);
Queue_Object(client->SendQueue)->fnObjectFree = (OBJECT_FREE_FN) rpc_pdu_free;
client->pdu = NULL;
client->ReceivePool = Queue_New(TRUE, -1, -1);
client->ReceiveQueue = Queue_New(TRUE, -1, -1);
Queue_Object(client->ReceivePool)->fnObjectFree = (OBJECT_FREE_FN) rpc_pdu_free;
Queue_Object(client->ReceiveQueue)->fnObjectFree = (OBJECT_FREE_FN) rpc_pdu_free;
client->RecvFrag = NULL;
client->FragmentPool = Queue_New(TRUE, -1, -1);
client->FragmentQueue = Queue_New(TRUE, -1, -1);
@ -525,21 +536,27 @@ int rpc_client_free(rdpRpc* rpc)
if (client)
{
Queue_Clear(client->SendQueue);
Queue_Free(client->SendQueue);
Queue_Clear(client->ReceivePool);
Queue_Free(client->ReceivePool);
if (client->RecvFrag)
rpc_fragment_free(client->RecvFrag);
Queue_Clear(client->ReceiveQueue);
Queue_Free(client->FragmentPool);
Queue_Free(client->FragmentQueue);
if (client->pdu)
rpc_pdu_free(client->pdu);
Queue_Free(client->ReceivePool);
Queue_Free(client->ReceiveQueue);
ArrayList_Clear(client->ClientCallList);
ArrayList_Free(client->ClientCallList);
CloseHandle(client->StopEvent);
CloseHandle(client->PduSentEvent);
CloseHandle(client->Thread);
free(client);
}

View File

@ -698,6 +698,7 @@ int rts_recv_CONN_A3_pdu(rdpRpc* rpc, BYTE* buffer, UINT32 length)
int rts_send_CONN_B1_pdu(rdpRpc* rpc)
{
BYTE* buffer;
UINT32 length;
rpcconn_rts_hdr_t header;
BYTE* INChannelCookie;
BYTE* AssociationGroupId;
@ -727,7 +728,9 @@ int rts_send_CONN_B1_pdu(rdpRpc* rpc)
rts_client_keepalive_command_write(&buffer[76], rpc->KeepAliveInterval); /* ClientKeepalive (8 bytes) */
rts_association_group_id_command_write(&buffer[84], AssociationGroupId); /* AssociationGroupId (20 bytes) */
rpc_in_write(rpc, buffer, header.frag_length);
length = header.frag_length;
rpc_in_write(rpc, buffer, length);
free(buffer);
@ -766,6 +769,7 @@ int rts_recv_CONN_C2_pdu(rdpRpc* rpc, BYTE* buffer, UINT32 length)
int rts_send_keep_alive_pdu(rdpRpc* rpc)
{
BYTE* buffer;
UINT32 length;
rpcconn_rts_hdr_t header;
rts_pdu_header_init(&header);
@ -779,16 +783,18 @@ int rts_send_keep_alive_pdu(rdpRpc* rpc)
CopyMemory(buffer, ((BYTE*) &header), 20); /* RTS Header (20 bytes) */
rts_client_keepalive_command_write(&buffer[20], rpc->CurrentKeepAliveInterval); /* ClientKeepAlive (8 bytes) */
rpc_in_write(rpc, buffer, header.frag_length);
length = header.frag_length;
rpc_in_write(rpc, buffer, length);
free(buffer);
return 0;
return length;
}
int rts_send_flow_control_ack_pdu(rdpRpc* rpc)
{
BYTE* buffer;
UINT32 length;
rpcconn_rts_hdr_t header;
UINT32 BytesReceived;
UINT32 AvailableWindow;
@ -816,8 +822,9 @@ int rts_send_flow_control_ack_pdu(rdpRpc* rpc)
/* FlowControlAck Command (28 bytes) */
rts_flow_control_ack_command_write(&buffer[28], BytesReceived, AvailableWindow, ChannelCookie);
rpc_in_write(rpc, buffer, header.frag_length);
length = header.frag_length;
rpc_in_write(rpc, buffer, length);
free(buffer);
return 0;
@ -870,6 +877,7 @@ int rts_recv_flow_control_ack_with_destination_pdu(rdpRpc* rpc, BYTE* buffer, UI
int rts_send_ping_pdu(rdpRpc* rpc)
{
BYTE* buffer;
UINT32 length;
rpcconn_rts_hdr_t header;
rts_pdu_header_init(&header);
@ -883,11 +891,12 @@ int rts_send_ping_pdu(rdpRpc* rpc)
CopyMemory(buffer, ((BYTE*) &header), 20); /* RTS Header (20 bytes) */
rpc_in_write(rpc, buffer, header.frag_length);
length = header.frag_length;
rpc_in_write(rpc, buffer, length);
free(buffer);
return 0;
return length;
}
int rts_command_length(rdpRpc* rpc, UINT32 CommandType, BYTE* buffer, UINT32 length)

View File

@ -632,7 +632,14 @@ void transport_free(rdpTransport* transport)
if (transport->TlsIn)
tls_free(transport->TlsIn);
if (transport->TlsOut != transport->TlsIn)
tls_free(transport->TlsOut);
tcp_free(transport->TcpIn);
if (transport->TcpOut != transport->TcpIn)
tcp_free(transport->TcpOut);
tsg_free(transport->tsg);
free(transport);

View File

@ -26,6 +26,7 @@
#ifndef _WIN32
#include "../synch/synch.h"
#include "../thread/thread.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
@ -41,6 +42,12 @@ BOOL CloseHandle(HANDLE hObject)
if (Type == HANDLE_TYPE_THREAD)
{
WINPR_THREAD* thread;
thread = (WINPR_THREAD*) thread;
free(thread);
return TRUE;
}
else if (Type == HANDLE_TYPE_MUTEX)

View File

@ -18,6 +18,8 @@
set(MODULE_NAME "winpr-synch")
set(MODULE_PREFIX "WINPR_SYNCH")
include_directories(../thread)
set(${MODULE_PREFIX}_SRCS
address.c
barrier.c
@ -65,3 +67,4 @@ else()
endif()
set_property(TARGET ${MODULE_NAME} PROPERTY FOLDER "WinPR")

View File

@ -29,6 +29,7 @@
#include <winpr/synch.h>
#include "synch.h"
#include "thread.h"
/**
* WaitForSingleObject
@ -49,10 +50,14 @@ DWORD WaitForSingleObject(HANDLE hHandle, DWORD dwMilliseconds)
if (Type == HANDLE_TYPE_THREAD)
{
WINPR_THREAD* thread;
if (dwMilliseconds != INFINITE)
printf("WaitForSingleObject: timeout not implemented for thread wait\n");
pthread_join((pthread_t) Object, NULL);
thread = (WINPR_THREAD*) Object;
pthread_join(thread->thread, NULL);
}
if (Type == HANDLE_TYPE_MUTEX)
{

View File

@ -22,6 +22,7 @@ set(${MODULE_PREFIX}_SRCS
process.c
processor.c
thread.c
thread.h
tls.c)
if(MSVC AND (NOT MONOLITHIC_BUILD))

View File

@ -69,27 +69,13 @@
#include <winpr/crt.h>
#include <pthread.h>
#include "thread.h"
/**
* TODO: implement thread suspend/resume using pthreads
* http://stackoverflow.com/questions/3140867/suspend-pthreads-without-using-condition
*/
typedef void *(*pthread_start_routine)(void*);
struct winpr_thread
{
BOOL started;
pthread_t thread;
SIZE_T dwStackSize;
LPVOID lpParameter;
pthread_mutex_t mutex;
LPTHREAD_START_ROUTINE lpStartAddress;
LPSECURITY_ATTRIBUTES lpThreadAttributes;
};
typedef struct winpr_thread WINPR_THREAD;
void winpr_StartThread(WINPR_THREAD* thread)
{
pthread_attr_t attr;

View File

@ -0,0 +1,46 @@
/**
* WinPR: Windows Portable Runtime
* Process Thread Functions
*
* Copyright 2012 Marc-Andre Moreau <marcandre.moreau@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef WINPR_THREAD_PRIVATE_H
#define WINPR_THREAD_PRIVATE_H
#ifndef _WIN32
#include <winpr/thread.h>
#include <pthread.h>
typedef void *(*pthread_start_routine)(void*);
struct winpr_thread
{
BOOL started;
pthread_t thread;
SIZE_T dwStackSize;
LPVOID lpParameter;
pthread_mutex_t mutex;
LPTHREAD_START_ROUTINE lpStartAddress;
LPSECURITY_ATTRIBUTES lpThreadAttributes;
};
typedef struct winpr_thread WINPR_THREAD;
#endif
#endif /* WINPR_THREAD_PRIVATE_H */

View File

@ -398,6 +398,8 @@ wArrayList* ArrayList_New(BOOL synchronized)
void ArrayList_Free(wArrayList* arrayList)
{
ArrayList_Clear(arrayList);
CloseHandle(arrayList->mutex);
free(arrayList->array);
free(arrayList);

View File

@ -85,7 +85,7 @@ void Queue_Clear(wQueue* queue)
if (queue->synchronized)
WaitForSingleObject(queue->mutex, INFINITE);
for (index = 0; index < queue->size; index++)
for (index = queue->head; index != queue->tail; index++)
{
if (queue->object.fnObjectFree)
queue->object.fnObjectFree(queue->array[index]);
@ -240,6 +240,8 @@ wQueue* Queue_New(BOOL synchronized, int capacity, int growthFactor)
void Queue_Free(wQueue* queue)
{
Queue_Clear(queue);
CloseHandle(queue->event);
CloseHandle(queue->mutex);
free(queue->array);